From 9dd91fe1fb59530ba2a0d10a604e3603dc7c6645 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Apr 2026 05:54:53 +0000 Subject: [PATCH] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) from 1.42.0 to 1.43.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.42.0...v1.43.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-version: 1.43.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- go.mod | 14 +- go.sum | 32 +- .../internal/tracetransform/attribute.go | 1 + .../otlp/otlptrace/otlptracehttp/client.go | 21 +- .../otlp/otlptrace/otlptracehttp/doc.go | 5 + .../otlptracehttp/internal/version.go | 2 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../proto/otlp/common/v1/common.pb.go | 145 ++- .../googleapis/api/annotations/client.pb.go | 926 +++++++++++++----- .../api/annotations/field_behavior.pb.go | 2 +- .../api/annotations/field_info.pb.go | 2 +- .../googleapis/api/annotations/http.pb.go | 2 +- .../googleapis/api/annotations/resource.pb.go | 2 +- .../googleapis/api/annotations/routing.pb.go | 12 +- .../api/distribution/distribution.pb.go | 2 +- .../googleapis/api/httpbody/httpbody.pb.go | 2 +- .../genproto/googleapis/api/label/label.pb.go | 2 +- .../googleapis/api/launch_stage.pb.go | 2 +- .../googleapis/api/metric/metric.pb.go | 2 +- .../api/monitoredres/monitored_resource.pb.go | 2 +- .../genproto/googleapis/rpc/code/code.pb.go | 2 +- .../rpc/errdetails/error_details.pb.go | 10 +- .../googleapis/rpc/status/status.pb.go | 7 +- .../grpc/attributes/attributes.go | 77 +- .../grpc/balancer/balancer.go | 32 +- .../grpc/balancer/base/balancer.go | 6 +- .../endpointsharding/endpointsharding.go | 11 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 2 +- .../balancer/leastrequest/leastrequest.go | 2 +- .../grpc/balancer/pickfirst/pickfirst.go | 12 +- .../grpc/balancer/ringhash/ringhash.go | 12 +- .../grpc/balancer/rls/config.go | 2 +- .../balancer/weightedroundrobin/balancer.go | 4 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- .../credentials/alts/internal/conn/record.go | 40 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 2 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- .../google.golang.org/grpc/credentials/tls.go | 18 +- vendor/google.golang.org/grpc/dialoptions.go | 9 +- .../grpc/health/grpc_health_v1/health.pb.go | 2 +- .../health/grpc_health_v1/health_grpc.pb.go | 2 +- .../internal/balancergroup/balancergroup.go | 68 +- .../grpc/internal/envconfig/envconfig.go | 38 +- .../grpc/internal/mem/buffer_pool.go | 338 +++++++ .../internal/proto/grpc_lookup_v1/rls.pb.go | 2 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 2 +- .../proto/grpc_lookup_v1/rls_grpc.pb.go | 2 +- .../grpc/internal/transport/defaults.go | 1 + .../grpc/internal/transport/http2_client.go | 6 +- .../grpc/internal/transport/http2_server.go | 6 +- .../grpc/internal/xds/balancer/balancer.go | 1 - .../xds/balancer/cdsbalancer/cdsbalancer.go | 626 ++++++------ .../balancer/cdsbalancer/cluster_watcher.go | 56 -- .../configbuilder.go | 138 +-- .../configbuilder_childname.go | 63 +- .../xds/balancer/clusterimpl/clusterimpl.go | 83 +- .../xds/balancer/clusterimpl/config.go | 13 +- .../balancer/clustermanager/clustermanager.go | 2 +- .../clusterresolver/clusterresolver.go | 410 -------- .../xds/balancer/clusterresolver/config.go | 160 --- .../xds/balancer/clusterresolver/logging.go | 34 - .../clusterresolver/resource_resolver.go | 322 ------ .../clusterresolver/resource_resolver_dns.go | 172 ---- .../clusterresolver/resource_resolver_eds.go | 124 --- .../xds/balancer/outlierdetection/balancer.go | 12 +- .../xds/balancer/priority/balancer.go | 7 +- .../xds/balancer/priority/balancer_child.go | 8 +- .../balancer/priority/balancer_priority.go | 2 +- .../xds/clients/xdsclient/authority.go | 2 +- .../internal/xds/clients/xdsclient/channel.go | 12 +- .../grpc/internal/xds/rbac/matchers.go | 36 +- .../internal/xds/resolver/serviceconfig.go | 57 +- .../internal/xds/resolver/xds_resolver.go | 114 ++- .../grpc/internal/xds/server/conn_wrapper.go | 15 +- .../xds/server/filter_chain_manager.go | 414 ++++++++ .../internal/xds/server/listener_wrapper.go | 63 +- .../grpc/internal/xds/server/routing.go | 198 ++++ .../xdslbregistry/converter/converter.go | 2 +- .../xds/xdsclient/xdsresource/filter_chain.go | 827 +--------------- .../xdsresource/listener_resource_type.go | 30 +- .../xds/xdsclient/xdsresource/matcher.go | 28 - .../xds/xdsclient/xdsresource/metadata.go | 11 +- .../xds/xdsclient/xdsresource/type_lds.go | 24 +- .../xdsclient/xdsresource/unmarshal_eds.go | 9 +- .../xdsclient/xdsresource/unmarshal_lds.go | 342 ++++++- .../xds/xdsdepmgr/xds_dependency_manager.go | 286 ++++-- .../google.golang.org/grpc/mem/buffer_pool.go | 167 +--- .../google.golang.org/grpc/picker_wrapper.go | 4 +- vendor/google.golang.org/grpc/resolver/map.go | 34 + vendor/google.golang.org/grpc/rpc_util.go | 22 +- vendor/google.golang.org/grpc/server.go | 1 + vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/xds/server.go | 78 +- vendor/modules.txt | 20 +- 97 files changed, 3478 insertions(+), 3458 deletions(-) create mode 100644 vendor/google.golang.org/grpc/internal/mem/buffer_pool.go delete mode 100644 vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cluster_watcher.go rename vendor/google.golang.org/grpc/internal/xds/balancer/{clusterresolver => cdsbalancer}/configbuilder.go (73%) rename vendor/google.golang.org/grpc/internal/xds/balancer/{clusterresolver => cdsbalancer}/configbuilder_childname.go (62%) delete mode 100644 vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/clusterresolver.go delete mode 100644 vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/config.go delete mode 100644 vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/logging.go delete mode 100644 vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver.go delete mode 100644 vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_dns.go delete mode 100644 vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_eds.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/server/filter_chain_manager.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/server/routing.go diff --git a/go.mod b/go.mod index 2f0c6dd727..a18758201d 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( go.uber.org/zap v1.27.1 golang.org/x/crypto v0.49.0 golang.org/x/term v0.41.0 - google.golang.org/grpc v1.79.3 + google.golang.org/grpc v1.80.0 google.golang.org/protobuf v1.36.11 gotest.tools v2.2.0+incompatible gotest.tools/v3 v3.5.2 @@ -78,7 +78,7 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect github.com/IBM/sarama v1.45.2 // indirect @@ -277,16 +277,16 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.42.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.64.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.42.0 // indirect go.opentelemetry.io/otel/metric v1.43.0 // indirect go.opentelemetry.io/otel/sdk v1.43.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect go.opentelemetry.io/otel/trace v1.43.0 // indirect - go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.step.sm/crypto v0.75.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect @@ -307,8 +307,8 @@ require ( gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/api v0.271.0 // indirect google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 1739f86370..0b96d4c155 100644 --- a/go.sum +++ b/go.sum @@ -85,8 +85,8 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 h1:DHa2U07rk8syqvCge0QIGMCE1WxGj9njT44GH7zNJLQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ= @@ -814,12 +814,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0 h1:MdK go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.42.0/go.mod h1:RolT8tWtfHcjajEH5wFIZ4Dgh5jpPdFXYV9pTAk/qjc= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.42.0 h1:H7O6RlGOMTizyl3R08Kn5pdM06bnH8oscSj7o11tmLA= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.42.0/go.mod h1:mBFWu/WOVDkWWsR7Tx7h6EpQB8wsv7P0Yrh0Pb7othc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 h1:THuZiwpQZuHPul65w4WcwEnkX2QIuMT+UFoOrygtoJw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0/go.mod h1:J2pvYM5NGHofZ2/Ru6zw/TNWnEQp5crgyDeSrYpXkAw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 h1:zWWrB1U6nqhS/k6zYB74CjRpuiitRtLLi68VcgmOEto= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0/go.mod h1:2qXPNBX1OVRC0IwOnfo1ljoid+RD0QK3443EaqVlsOU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 h1:uLXP+3mghfMf7XmV4PkGfFhFKuNWoCvvx5wP/wOXo0o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0/go.mod h1:v0Tj04armyT59mnURNUJf7RCKcKzq+lgJs6QSjHjaTc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak= go.opentelemetry.io/otel/exporters/prometheus v0.64.0 h1:g0LRDXMX/G1SEZtK8zl8Chm4K6GBwRkjPKE36LxiTYs= go.opentelemetry.io/otel/exporters/prometheus v0.64.0/go.mod h1:UrgcjnarfdlBDP3GjDIJWe6HTprwSazNjwsI+Ru6hro= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= @@ -834,8 +834,8 @@ go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfC go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= -go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= -go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.step.sm/crypto v0.75.0 h1:UAHYD6q6ggYyzLlIKHv1MCUVjZIesXRZpGTlRC/HSHw= @@ -995,8 +995,8 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/api v0.271.0 h1:cIPN4qcUc61jlh7oXu6pwOQqbJW2GqYh5PS6rB2C/JY= google.golang.org/api v0.271.0/go.mod h1:CGT29bhwkbF+i11qkRUJb2KMKqcJ1hdFceEIRd9u64Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1006,15 +1006,15 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= -google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= -google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= -google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index d9bfd6e176..12e243e042 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -93,6 +93,7 @@ func Value(v attribute.Value) *commonpb.AnyValue { Values: stringSliceValues(v.AsStringSlice()), }, } + case attribute.EMPTY: default: av.Value = &commonpb.AnyValue_StringValue{ StringValue: "INVALID", diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 05cb233431..4ae569ff4b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -32,6 +32,13 @@ import ( const contentTypeProto = "application/x-protobuf" +// maxResponseBodySize is the maximum number of bytes to read from a response +// body. It is set to 4 MiB per the OTLP specification recommendation to +// mitigate excessive memory usage caused by a misconfigured or malicious +// server. If exceeded, the response is treated as a not-retryable error. +// This is a variable to allow tests to override it. +var maxResponseBodySize int64 = 4 * 1024 * 1024 + var gzPool = sync.Pool{ New: func() any { w := gzip.NewWriter(io.Discard) @@ -203,7 +210,11 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } if respData.Len() == 0 { @@ -234,7 +245,11 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc // message to be returned. It will help in // debugging the actual issue. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } respStr := strings.TrimSpace(respData.String()) @@ -259,7 +274,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc func (d *client) newRequest(body []byte) (request, error) { u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} - r, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) + r, err := http.NewRequestWithContext(context.Background(), http.MethodPost, u.String(), http.NoBody) if err != nil { return request{Request: r}, err } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go index 9fea75ad19..85645e118e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go @@ -24,6 +24,11 @@ The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's HTTP connection. +OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure] and [WithTLSClientConfig] options. + OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go index c1e93d98c9..3e43f77113 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go @@ -5,4 +5,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot // Version is the current release version of the OpenTelemetry OTLP HTTP trace // exporter in use. -const Version = "1.42.0" +const Version = "1.43.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index d1b43c3ba4..087e95f7b8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.42.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 1f8d49bc98..304f647637 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -53,6 +53,7 @@ type AnyValue struct { // *AnyValue_ArrayValue // *AnyValue_KvlistValue // *AnyValue_BytesValue + // *AnyValue_StringValueStrindex Value isAnyValue_Value `protobuf_oneof:"value"` } @@ -144,6 +145,13 @@ func (x *AnyValue) GetBytesValue() []byte { return nil } +func (x *AnyValue) GetStringValueStrindex() int32 { + if x, ok := x.GetValue().(*AnyValue_StringValueStrindex); ok { + return x.StringValueStrindex + } + return 0 +} + type isAnyValue_Value interface { isAnyValue_Value() } @@ -176,6 +184,20 @@ type AnyValue_BytesValue struct { BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` } +type AnyValue_StringValueStrindex struct { + // Reference to the string value in ProfilesDictionary.string_table. + // + // Note: This is currently used exclusively in the Profiling signal. + // Implementers of OTLP receivers for signals other than Profiling should + // treat the presence of this value as a non-fatal issue. + // Log an error or warning indicating an unexpected field intended for the + // Profiling signal and process the data as if this value were absent or + // empty, ignoring its semantic content for the non-Profiling signal. + // + // Status: [Development] + StringValueStrindex int32 `protobuf:"varint,8,opt,name=string_value_strindex,json=stringValueStrindex,proto3,oneof"` +} + func (*AnyValue_StringValue) isAnyValue_Value() {} func (*AnyValue_BoolValue) isAnyValue_Value() {} @@ -190,6 +212,8 @@ func (*AnyValue_KvlistValue) isAnyValue_Value() {} func (*AnyValue_BytesValue) isAnyValue_Value() {} +func (*AnyValue_StringValueStrindex) isAnyValue_Value() {} + // ArrayValue is a list of AnyValue messages. We need ArrayValue as a message // since oneof in AnyValue does not allow repeated fields. type ArrayValue struct { @@ -306,9 +330,22 @@ type KeyValue struct { unknownFields protoimpl.UnknownFields // The key name of the pair. + // key_ref MUST NOT be set if key is used. Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // The value of the pair. Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Reference to the string key in ProfilesDictionary.string_table. + // key MUST NOT be set if key_strindex is used. + // + // Note: This is currently used exclusively in the Profiling signal. + // Implementers of OTLP receivers for signals other than Profiling should + // treat the presence of this key as a non-fatal issue. + // Log an error or warning indicating an unexpected field intended for the + // Profiling signal and process the data as if this value were absent or + // empty, ignoring its semantic content for the non-Profiling signal. + // + // Status: [Development] + KeyStrindex int32 `protobuf:"varint,3,opt,name=key_strindex,json=keyStrindex,proto3" json:"key_strindex,omitempty"` } func (x *KeyValue) Reset() { @@ -357,6 +394,13 @@ func (x *KeyValue) GetValue() *AnyValue { return nil } +func (x *KeyValue) GetKeyStrindex() int32 { + if x != nil { + return x.KeyStrindex + } + return 0 +} + // InstrumentationScope is a message representing the instrumentation scope information // such as the fully qualified name and version. type InstrumentationScope struct { @@ -543,7 +587,7 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0x96, 0x03, 0x0a, 0x08, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, @@ -565,52 +609,58 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, - 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, - 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, - 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, - 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b, - 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, - 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, - 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, - 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, - 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, - 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x15, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x13, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x07, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x22, 0x7e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x22, 0xc7, 0x01, 0x0a, 0x14, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, + 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -735,6 +785,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { (*AnyValue_ArrayValue)(nil), (*AnyValue_KvlistValue)(nil), (*AnyValue_BytesValue)(nil), + (*AnyValue_StringValueStrindex)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index f840481726..c3315d52fe 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -167,6 +167,63 @@ func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { return file_google_api_client_proto_rawDescGZIP(), []int{1} } +// The behavior to take when the flow control limit is exceeded. +type FlowControlLimitExceededBehaviorProto int32 + +const ( + // Default behavior, system-defined. + FlowControlLimitExceededBehaviorProto_UNSET_BEHAVIOR FlowControlLimitExceededBehaviorProto = 0 + // Stop operation, raise error. + FlowControlLimitExceededBehaviorProto_THROW_EXCEPTION FlowControlLimitExceededBehaviorProto = 1 + // Pause operation until limit clears. + FlowControlLimitExceededBehaviorProto_BLOCK FlowControlLimitExceededBehaviorProto = 2 + // Continue operation, disregard limit. + FlowControlLimitExceededBehaviorProto_IGNORE FlowControlLimitExceededBehaviorProto = 3 +) + +// Enum value maps for FlowControlLimitExceededBehaviorProto. +var ( + FlowControlLimitExceededBehaviorProto_name = map[int32]string{ + 0: "UNSET_BEHAVIOR", + 1: "THROW_EXCEPTION", + 2: "BLOCK", + 3: "IGNORE", + } + FlowControlLimitExceededBehaviorProto_value = map[string]int32{ + "UNSET_BEHAVIOR": 0, + "THROW_EXCEPTION": 1, + "BLOCK": 2, + "IGNORE": 3, + } +) + +func (x FlowControlLimitExceededBehaviorProto) Enum() *FlowControlLimitExceededBehaviorProto { + p := new(FlowControlLimitExceededBehaviorProto) + *p = x + return p +} + +func (x FlowControlLimitExceededBehaviorProto) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FlowControlLimitExceededBehaviorProto) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[2].Descriptor() +} + +func (FlowControlLimitExceededBehaviorProto) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[2] +} + +func (x FlowControlLimitExceededBehaviorProto) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FlowControlLimitExceededBehaviorProto.Descriptor instead. +func (FlowControlLimitExceededBehaviorProto) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + // Required information for every language. type CommonLanguageSettings struct { state protoimpl.MessageState @@ -547,8 +604,9 @@ type JavaSettings struct { // Example of a YAML configuration:: // // publishing: - // java_settings: - // library_package: com.google.cloud.pubsub.v1 + // library_settings: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` // Configure the Java class name to use instead of the service's for its // corresponding generated GAPIC client. Keys are fully-qualified @@ -679,6 +737,19 @@ type PhpSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // The package name to use in Php. Clobbers the php_namespace option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.php.package_name" field + // in gapic.yaml. API teams should use the protobuf php_namespace option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // library_settings: + // php_settings: + // library_package: Google\Cloud\PubSub\V1 + LibraryPackage string `protobuf:"bytes,2,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` } func (x *PhpSettings) Reset() { @@ -720,6 +791,13 @@ func (x *PhpSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PhpSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + // Settings for Python client libraries. type PythonSettings struct { state protoimpl.MessageState @@ -997,11 +1075,12 @@ type GoSettings struct { // service names and values are the name to be used for the service client // and call options. // - // publishing: + // Example: // - // go_settings: - // renamed_services: - // Publisher: TopicAdmin + // publishing: + // go_settings: + // renamed_services: + // Publisher: TopicAdmin RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -1094,6 +1173,18 @@ type MethodSettings struct { // auto_populated_fields: // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` + // Batching configuration for an API method in client libraries. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.BatchCreateExample + // batching: + // element_count_threshold: 1000 + // request_byte_threshold: 100000000 + // delay_threshold_millis: 10 + Batching *BatchingConfigProto `protobuf:"bytes,4,opt,name=batching,proto3" json:"batching,omitempty"` } func (x *MethodSettings) Reset() { @@ -1149,6 +1240,13 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +func (x *MethodSettings) GetBatching() *BatchingConfigProto { + if x != nil { + return x.Batching + } + return nil +} + // This message is used to configure the generation of a subset of the RPCs in // a service for client libraries. type SelectiveGapicGeneration struct { @@ -1214,6 +1312,257 @@ func (x *SelectiveGapicGeneration) GetGenerateOmittedAsInternal() bool { return false } +// `BatchingConfigProto` defines the batching configuration for an API method. +type BatchingConfigProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The thresholds which trigger a batched request to be sent. + Thresholds *BatchingSettingsProto `protobuf:"bytes,1,opt,name=thresholds,proto3" json:"thresholds,omitempty"` + // The request and response fields used in batching. + BatchDescriptor *BatchingDescriptorProto `protobuf:"bytes,2,opt,name=batch_descriptor,json=batchDescriptor,proto3" json:"batch_descriptor,omitempty"` +} + +func (x *BatchingConfigProto) Reset() { + *x = BatchingConfigProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchingConfigProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchingConfigProto) ProtoMessage() {} + +func (x *BatchingConfigProto) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchingConfigProto.ProtoReflect.Descriptor instead. +func (*BatchingConfigProto) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{13} +} + +func (x *BatchingConfigProto) GetThresholds() *BatchingSettingsProto { + if x != nil { + return x.Thresholds + } + return nil +} + +func (x *BatchingConfigProto) GetBatchDescriptor() *BatchingDescriptorProto { + if x != nil { + return x.BatchDescriptor + } + return nil +} + +// `BatchingSettingsProto` specifies a set of batching thresholds, each of +// which acts as a trigger to send a batch of messages as a request. At least +// one threshold must be positive nonzero. +type BatchingSettingsProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of elements of a field collected into a batch which, if + // exceeded, causes the batch to be sent. + ElementCountThreshold int32 `protobuf:"varint,1,opt,name=element_count_threshold,json=elementCountThreshold,proto3" json:"element_count_threshold,omitempty"` + // The aggregated size of the batched field which, if exceeded, causes the + // batch to be sent. This size is computed by aggregating the sizes of the + // request field to be batched, not of the entire request message. + RequestByteThreshold int64 `protobuf:"varint,2,opt,name=request_byte_threshold,json=requestByteThreshold,proto3" json:"request_byte_threshold,omitempty"` + // The duration after which a batch should be sent, starting from the addition + // of the first message to that batch. + DelayThreshold *durationpb.Duration `protobuf:"bytes,3,opt,name=delay_threshold,json=delayThreshold,proto3" json:"delay_threshold,omitempty"` + // The maximum number of elements collected in a batch that could be accepted + // by server. + ElementCountLimit int32 `protobuf:"varint,4,opt,name=element_count_limit,json=elementCountLimit,proto3" json:"element_count_limit,omitempty"` + // The maximum size of the request that could be accepted by server. + RequestByteLimit int32 `protobuf:"varint,5,opt,name=request_byte_limit,json=requestByteLimit,proto3" json:"request_byte_limit,omitempty"` + // The maximum number of elements allowed by flow control. + FlowControlElementLimit int32 `protobuf:"varint,6,opt,name=flow_control_element_limit,json=flowControlElementLimit,proto3" json:"flow_control_element_limit,omitempty"` + // The maximum size of data allowed by flow control. + FlowControlByteLimit int32 `protobuf:"varint,7,opt,name=flow_control_byte_limit,json=flowControlByteLimit,proto3" json:"flow_control_byte_limit,omitempty"` + // The behavior to take when the flow control limit is exceeded. + FlowControlLimitExceededBehavior FlowControlLimitExceededBehaviorProto `protobuf:"varint,8,opt,name=flow_control_limit_exceeded_behavior,json=flowControlLimitExceededBehavior,proto3,enum=google.api.FlowControlLimitExceededBehaviorProto" json:"flow_control_limit_exceeded_behavior,omitempty"` +} + +func (x *BatchingSettingsProto) Reset() { + *x = BatchingSettingsProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchingSettingsProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchingSettingsProto) ProtoMessage() {} + +func (x *BatchingSettingsProto) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchingSettingsProto.ProtoReflect.Descriptor instead. +func (*BatchingSettingsProto) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{14} +} + +func (x *BatchingSettingsProto) GetElementCountThreshold() int32 { + if x != nil { + return x.ElementCountThreshold + } + return 0 +} + +func (x *BatchingSettingsProto) GetRequestByteThreshold() int64 { + if x != nil { + return x.RequestByteThreshold + } + return 0 +} + +func (x *BatchingSettingsProto) GetDelayThreshold() *durationpb.Duration { + if x != nil { + return x.DelayThreshold + } + return nil +} + +func (x *BatchingSettingsProto) GetElementCountLimit() int32 { + if x != nil { + return x.ElementCountLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetRequestByteLimit() int32 { + if x != nil { + return x.RequestByteLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetFlowControlElementLimit() int32 { + if x != nil { + return x.FlowControlElementLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetFlowControlByteLimit() int32 { + if x != nil { + return x.FlowControlByteLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetFlowControlLimitExceededBehavior() FlowControlLimitExceededBehaviorProto { + if x != nil { + return x.FlowControlLimitExceededBehavior + } + return FlowControlLimitExceededBehaviorProto_UNSET_BEHAVIOR +} + +// `BatchingDescriptorProto` specifies the fields of the request message to be +// used for batching, and, optionally, the fields of the response message to be +// used for demultiplexing. +type BatchingDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The repeated field in the request message to be aggregated by batching. + BatchedField string `protobuf:"bytes,1,opt,name=batched_field,json=batchedField,proto3" json:"batched_field,omitempty"` + // A list of the fields in the request message. Two requests will be batched + // together only if the values of every field specified in + // `request_discriminator_fields` is equal between the two requests. + DiscriminatorFields []string `protobuf:"bytes,2,rep,name=discriminator_fields,json=discriminatorFields,proto3" json:"discriminator_fields,omitempty"` + // Optional. When present, indicates the field in the response message to be + // used to demultiplex the response into multiple response messages, in + // correspondence with the multiple request messages originally batched + // together. + SubresponseField string `protobuf:"bytes,3,opt,name=subresponse_field,json=subresponseField,proto3" json:"subresponse_field,omitempty"` +} + +func (x *BatchingDescriptorProto) Reset() { + *x = BatchingDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchingDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchingDescriptorProto) ProtoMessage() {} + +func (x *BatchingDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchingDescriptorProto.ProtoReflect.Descriptor instead. +func (*BatchingDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{15} +} + +func (x *BatchingDescriptorProto) GetBatchedField() string { + if x != nil { + return x.BatchedField + } + return "" +} + +func (x *BatchingDescriptorProto) GetDiscriminatorFields() []string { + if x != nil { + return x.DiscriminatorFields + } + return nil +} + +func (x *BatchingDescriptorProto) GetSubresponseField() string { + if x != nil { + return x.SubresponseField + } + return "" +} + // Experimental features to be included during client library generation. // These fields will be deprecated once the feature graduates and is enabled // by default. @@ -1242,7 +1591,7 @@ type PythonSettings_ExperimentalFeatures struct { func (x *PythonSettings_ExperimentalFeatures) Reset() { *x = PythonSettings_ExperimentalFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[14] + mi := &file_google_api_client_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1255,7 +1604,7 @@ func (x *PythonSettings_ExperimentalFeatures) String() string { func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[14] + mi := &file_google_api_client_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1320,7 +1669,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[18] + mi := &file_google_api_client_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1333,7 +1682,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[18] + mi := &file_google_api_client_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1640,173 +1989,241 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, + 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, + 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, + 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, + 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, - 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, - 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, - 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, - 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, - 0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, - 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, - 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, - 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, - 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, - 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, - 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, - 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, - 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, - 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x73, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, - 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, - 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, - 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, - 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, - 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, - 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, - 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, - 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, - 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, - 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, - 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, - 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, - 0xfa, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, + 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, + 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, + 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, + 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x08, 0x62, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x62, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x1a, + 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, + 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, + 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, + 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, + 0x74, 0x65, 0x64, 0x41, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x22, 0xa8, 0x01, + 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x41, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x74, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x10, 0x62, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x9f, 0x04, 0x0a, 0x15, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x15, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x42, 0x79, 0x74, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x12, 0x42, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x11, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x79, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x5f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x35, 0x0a, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x14, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x42, 0x79, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x81, 0x01, 0x0a, 0x24, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x78, + 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x45, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x20, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x78, 0x63, 0x65, 0x65, 0x64, + 0x65, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x22, 0x9e, 0x01, 0x0a, 0x17, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x31, 0x0a, 0x14, 0x64, + 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x72, + 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2b, + 0x0a, 0x11, 0x73, 0x75, 0x62, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x75, 0x62, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2a, 0xa3, 0x01, 0x0a, 0x19, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, + 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, + 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, + 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, + 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, + 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, + 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, + 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, + 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, + 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, + 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, + 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x2a, 0x67, 0x0a, 0x25, 0x46, 0x6c, + 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x78, + 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x5f, 0x42, 0x45, 0x48, + 0x41, 0x56, 0x49, 0x4f, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x48, 0x52, 0x4f, 0x57, + 0x5f, 0x45, 0x58, 0x43, 0x45, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, + 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, + 0x45, 0x10, 0x03, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, + 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1821,76 +2238,85 @@ func file_google_api_client_proto_rawDescGZIP() []byte { return file_google_api_client_proto_rawDescData } -var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 22) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration - nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry - (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures - nil, // 17: google.api.DotnetSettings.RenamedServicesEntry - nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry - nil, // 19: google.api.GoSettings.RenamedServicesEntry - (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 21: google.api.LaunchStage - (*durationpb.Duration)(nil), // 22: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions + (FlowControlLimitExceededBehaviorProto)(0), // 2: google.api.FlowControlLimitExceededBehaviorProto + (*CommonLanguageSettings)(nil), // 3: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 4: google.api.ClientLibrarySettings + (*Publishing)(nil), // 5: google.api.Publishing + (*JavaSettings)(nil), // 6: google.api.JavaSettings + (*CppSettings)(nil), // 7: google.api.CppSettings + (*PhpSettings)(nil), // 8: google.api.PhpSettings + (*PythonSettings)(nil), // 9: google.api.PythonSettings + (*NodeSettings)(nil), // 10: google.api.NodeSettings + (*DotnetSettings)(nil), // 11: google.api.DotnetSettings + (*RubySettings)(nil), // 12: google.api.RubySettings + (*GoSettings)(nil), // 13: google.api.GoSettings + (*MethodSettings)(nil), // 14: google.api.MethodSettings + (*SelectiveGapicGeneration)(nil), // 15: google.api.SelectiveGapicGeneration + (*BatchingConfigProto)(nil), // 16: google.api.BatchingConfigProto + (*BatchingSettingsProto)(nil), // 17: google.api.BatchingSettingsProto + (*BatchingDescriptorProto)(nil), // 18: google.api.BatchingDescriptorProto + nil, // 19: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 20: google.api.PythonSettings.ExperimentalFeatures + nil, // 21: google.api.DotnetSettings.RenamedServicesEntry + nil, // 22: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 23: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 24: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 25: google.api.LaunchStage + (*durationpb.Duration)(nil), // 26: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 28: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration - 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 15, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 25, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 6, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 7, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 8, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 9, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 10, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 11, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 12, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 13, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 14, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures - 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry - 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 35, // [35:35] is the sub-list for method output_type - 35, // [35:35] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 31, // [31:35] is the sub-list for extension extendee - 0, // [0:31] is the sub-list for field type_name + 4, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 19, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 3, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 20, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 3, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 21, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 22, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 3, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 23, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 24, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 16, // 28: google.api.MethodSettings.batching:type_name -> google.api.BatchingConfigProto + 17, // 29: google.api.BatchingConfigProto.thresholds:type_name -> google.api.BatchingSettingsProto + 18, // 30: google.api.BatchingConfigProto.batch_descriptor:type_name -> google.api.BatchingDescriptorProto + 26, // 31: google.api.BatchingSettingsProto.delay_threshold:type_name -> google.protobuf.Duration + 2, // 32: google.api.BatchingSettingsProto.flow_control_limit_exceeded_behavior:type_name -> google.api.FlowControlLimitExceededBehaviorProto + 26, // 33: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 26, // 34: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 26, // 35: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 27, // 36: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 28, // 37: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 28, // 38: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 28, // 39: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 40, // [40:40] is the sub-list for method output_type + 40, // [40:40] is the sub-list for method input_type + 40, // [40:40] is the sub-list for extension type_name + 36, // [36:40] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -2055,7 +2481,43 @@ func file_google_api_client_proto_init() { return nil } } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchingConfigProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchingSettingsProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchingDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PythonSettings_ExperimentalFeatures); i { case 0: return &v.state @@ -2067,7 +2529,7 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -2085,8 +2547,8 @@ func file_google_api_client_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 2, - NumMessages: 19, + NumEnums: 3, + NumMessages: 22, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 5d583b8660..fc6d27b4ab 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index 53e9dd1e99..b660d02c12 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index d30fcee4ce..998205e180 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 175974a869..ad2a3fbf8b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index b8c4aa71f2..9a83b9636b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -66,9 +66,13 @@ const ( // app_profile_id: profiles/prof_qux // } // -// The routing header consists of one or multiple key-value pairs. Every key -// and value must be percent-encoded, and joined together in the format of -// `key1=value1&key2=value2`. +// The routing header consists of one or multiple key-value pairs. The order of +// the key-value pairs is undefined, the order of the `routing_parameters` in +// the `RoutingRule` only matters for the evaluation order of the path +// templates when `field` is the same. See the examples below for more details. +// +// Every key and value in the routing header must be percent-encoded, +// and joined together in the following format: `key1=value1&key2=value2`. // The examples below skip the percent-encoding for readability. // // # Example 1 diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go index bfe10c3931..44ec453ca7 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index d083dde3ed..902ae44983 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go index dc3654d7d9..c2a920b775 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index a69c1d4734..2cbb7b43b2 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go index f2fc30d256..0ef2e9b943 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go index bb6d1dcfc9..d6e274246e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 85a9387f79..d7e7647d88 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index e017ef0714..842a5d9b5f 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -957,17 +957,17 @@ type BadRequest_FieldViolation struct { // In this example, in proto `field` could take one of the following values: // // - `full_name` for a violation in the `full_name` value - // - `email_addresses[1].email` for a violation in the `email` field of the + // - `email_addresses[0].email` for a violation in the `email` field of the // first `email_addresses` message - // - `email_addresses[3].type[2]` for a violation in the second `type` + // - `email_addresses[2].type[1]` for a violation in the second `type` // value in the third `email_addresses` message. // // In JSON, the same values are represented as: // // - `fullName` for a violation in the `fullName` value - // - `emailAddresses[1].email` for a violation in the `email` field of the + // - `emailAddresses[0].email` for a violation in the `email` field of the // first `emailAddresses` message - // - `emailAddresses[3].type[2]` for a violation in the second `type` + // - `emailAddresses[2].type[1]` for a violation in the second `type` // value in the third `emailAddresses` message. Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 06a3f71063..f25a7bcc77 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -127,14 +127,13 @@ var file_google_rpc_status_proto_rawDesc = []byte{ 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x5e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x75, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 52d530d7ad..4c60518c74 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -27,6 +27,8 @@ package attributes import ( "fmt" + "iter" + "maps" "strings" ) @@ -37,37 +39,46 @@ import ( // any) bool', it will be called by (*Attributes).Equal to determine whether // two values with the same key should be considered equal. type Attributes struct { - m map[any]any + parent *Attributes + key, value any } // New returns a new Attributes containing the key/value pair. func New(key, value any) *Attributes { - return &Attributes{m: map[any]any{key: value}} + return &Attributes{ + key: key, + value: value, + } } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the -// last value overwrites all previous values for that key. To remove an -// existing key, use a nil value. value should not be modified later. +// last value overwrites all previous values for that key. value should not be +// modified later. +// +// Note that Attributes do not support deletion. Avoid using untyped nil values. +// Since the Value method returns an untyped nil when a key is absent, it is +// impossible to distinguish between a missing key and a key explicitly set to +// an untyped nil. If you need to represent a value being unset, consider +// storing a specific sentinel type or a wrapper struct with a boolean field +// indicating presence. func (a *Attributes) WithValue(key, value any) *Attributes { - if a == nil { - return New(key, value) + return &Attributes{ + parent: a, + key: key, + value: value, } - n := &Attributes{m: make(map[any]any, len(a.m)+1)} - for k, v := range a.m { - n.m[k] = v - } - n.m[key] = value - return n } // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key any) any { - if a == nil { - return nil + for cur := a; cur != nil; cur = cur.parent { + if cur.key == key { + return cur.value + } } - return a.m[key] + return nil } // Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is @@ -83,11 +94,15 @@ func (a *Attributes) Equal(o *Attributes) bool { if a == nil || o == nil { return false } - if len(a.m) != len(o.m) { - return false + if a == o { + return true } - for k, v := range a.m { - ov, ok := o.m[k] + m := maps.Collect(o.all()) + lenA := 0 + + for k, v := range a.all() { + lenA++ + ov, ok := m[k] if !ok { // o missing element of a return false @@ -101,7 +116,7 @@ func (a *Attributes) Equal(o *Attributes) bool { return false } } - return true + return lenA == len(m) } // String prints the attribute map. If any key or values throughout the map @@ -110,11 +125,11 @@ func (a *Attributes) String() string { var sb strings.Builder sb.WriteString("{") first := true - for k, v := range a.m { + for k, v := range a.all() { if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + fmt.Fprintf(&sb, "%q: %q ", str(k), str(v)) first = false } sb.WriteString("}") @@ -139,3 +154,21 @@ func str(x any) (s string) { func (a *Attributes) MarshalJSON() ([]byte, error) { return []byte(a.String()), nil } + +// all returns an iterator that yields all key-value pairs in the Attributes +// chain. If a key appears multiple times, only the most recently added value +// is yielded. +func (a *Attributes) all() iter.Seq2[any, any] { + return func(yield func(any, any) bool) { + seen := map[any]bool{} + for cur := a; cur != nil; cur = cur.parent { + if seen[cur.key] { + continue + } + if !yield(cur.key, cur.value) { + return + } + seen[cur.key] = true + } + } +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index d08b7ad637..326888ae35 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -33,6 +33,7 @@ import ( estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -46,8 +47,8 @@ var ( ) // Register registers the balancer builder to the balancer map. b.Name -// (lowercased) will be used as the name registered with this builder. If the -// Builder implements ConfigParser, ParseConfig will be called when new service +// will be used as the name registered with this builder. If the Builder +// implements ConfigParser, ParseConfig will be called when new service // configs are received by the resolver, and the result will be provided to the // Balancer in UpdateClientConnState. // @@ -55,12 +56,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - name := strings.ToLower(b.Name()) - if name != b.Name() { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + name := b.Name() + if !envconfig.CaseSensitiveBalancerRegistries { + name = strings.ToLower(name) + if name != b.Name() { + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon. After 2 releases, we will enable the env var by default.", b.Name()) + } } m[name] = b } @@ -78,16 +79,17 @@ func init() { } // Get returns the resolver builder registered with the given name. -// Note that the compare is done in a case-insensitive fashion. +// Note that the compare is done in a case-sensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { - if strings.ToLower(name) != name { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + if !envconfig.CaseSensitiveBalancerRegistries { + lowerName := strings.ToLower(name) + if lowerName != name { + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon. After 2 releases, we will enable the env var by default.", name) + } + name = lowerName } - if b, ok := m[strings.ToLower(name)]; ok { + if b, ok := m[name]; ok { return b } return nil diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 4d576876d8..4399ba0140 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -121,8 +121,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc.Connect() } } - for _, a := range b.subConns.Keys() { - sc, _ := b.subConns.Get(a) + for a, sc := range b.subConns.All() { // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { sc.Shutdown() @@ -171,8 +170,7 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for _, addr := range b.subConns.Keys() { - sc, _ := b.subConns.Get(addr) + for addr, sc := range b.subConns.All() { if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 360db08ebc..12479f6985 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -187,8 +187,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState } } // Delete old children that are no longer present. - for _, e := range children.Keys() { - child, _ := children.Get(e) + for e, child := range children.All() { if _, ok := newChildren.Get(e); !ok { child.closeLocked() } @@ -212,7 +211,7 @@ func (es *endpointSharding) ResolverError(err error) { es.updateState() }() children := es.children.Load() - for _, child := range children.Values() { + for _, child := range children.All() { child.resolverErrorLocked(err) } } @@ -225,7 +224,7 @@ func (es *endpointSharding) Close() { es.childMu.Lock() defer es.childMu.Unlock() children := es.children.Load() - for _, child := range children.Values() { + for _, child := range children.All() { child.closeLocked() } } @@ -233,7 +232,7 @@ func (es *endpointSharding) Close() { func (es *endpointSharding) ExitIdle() { es.childMu.Lock() defer es.childMu.Unlock() - for _, bw := range es.children.Load().Values() { + for _, bw := range es.children.Load().All() { if !bw.isClosed { bw.child.ExitIdle() } @@ -255,7 +254,7 @@ func (es *endpointSharding) updateState() { children := es.children.Load() childStates := make([]ChildState, 0, children.Len()) - for _, child := range children.Values() { + for _, child := range children.All() { childState := child.childState childStates = append(childStates, childState) childPicker := childState.State.Picker diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 7ad4d6d103..0a0ac51f2e 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index f506074ead..942b7e9676 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.6.0 +// - protoc-gen-go-grpc v1.6.1 // - protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go index c7621eea91..019d1a2cd7 100644 --- a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -180,7 +180,7 @@ func (lrb *leastRequestBalancer) UpdateState(state balancer.State) { } // If endpoints are no longer ready, no need to count their active RPCs. - for _, endpoint := range lrb.endpointRPCCounts.Keys() { + for endpoint := range lrb.endpointRPCCounts.All() { if _, ok := newEndpoints.Get(endpoint); !ok { lrb.endpointRPCCounts.Delete(endpoint) } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index dccd9f0bf3..518a69d573 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -399,14 +399,14 @@ func (b *pickfirstBalancer) startFirstPassLocked() { b.firstPass = true b.numTF = 0 // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { sd.connectionFailedInFirstPass = false } b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { sd.subConn.Shutdown() } b.subConns = resolver.NewAddressMapV2[*scData]() @@ -506,7 +506,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) newAddrsMap.Set(addr, true) } - for _, oldAddr := range b.subConns.Keys() { + for oldAddr := range b.subConns.All() { if _, ok := newAddrsMap.Get(oldAddr); ok { continue } @@ -520,7 +520,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { b.cancelConnectionTimer() - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { if sd.subConn != selected.subConn { sd.subConn.Shutdown() } @@ -771,7 +771,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { } // Connect() has been called on all the SubConns. The first pass can be // ended if all the SubConns have reported a failure. - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { if !sd.connectionFailedInFirstPass { return } @@ -782,7 +782,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } diff --git a/vendor/google.golang.org/grpc/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/balancer/ringhash/ringhash.go index 9ff92ada0f..027b4339bb 100644 --- a/vendor/google.golang.org/grpc/balancer/ringhash/ringhash.go +++ b/vendor/google.golang.org/grpc/balancer/ringhash/ringhash.go @@ -166,7 +166,7 @@ func (b *ringhashBalancer) UpdateState(state balancer.State) { } } - for _, endpoint := range b.endpointStates.Keys() { + for endpoint := range b.endpointStates.All() { if _, ok := endpointsSet.Get(endpoint); ok { continue } @@ -261,9 +261,9 @@ func (b *ringhashBalancer) updatePickerLocked() { // non-deterministic, the list of `endpointState`s must be sorted to // ensure `ExitIdle` is called on the same child, preventing unnecessary // connections. - var endpointStates = make([]*endpointState, b.endpointStates.Len()) - for i, s := range b.endpointStates.Values() { - endpointStates[i] = s + var endpointStates = make([]*endpointState, 0, b.endpointStates.Len()) + for _, s := range b.endpointStates.All() { + endpointStates = append(endpointStates, s) } sort.Slice(endpointStates, func(i, j int) bool { return endpointStates[i].hashKey < endpointStates[j].hashKey @@ -322,7 +322,7 @@ func (b *ringhashBalancer) ExitIdle() { func (b *ringhashBalancer) newPickerLocked() *picker { states := make(map[string]endpointState) hasEndpointConnecting := false - for _, epState := range b.endpointStates.Values() { + for _, epState := range b.endpointStates.All() { // Copy the endpoint state to avoid races, since ring hash // mutates the state, weight and hash key in place. states[epState.hashKey] = *epState @@ -356,7 +356,7 @@ func (b *ringhashBalancer) newPickerLocked() *picker { // failure to failover to the lower priority. func (b *ringhashBalancer) aggregatedStateLocked() connectivity.State { var nums [5]int - for _, es := range b.endpointStates.Values() { + for _, es := range b.endpointStates.All() { nums[es.state.ConnectivityState]++ } diff --git a/vendor/google.golang.org/grpc/balancer/rls/config.go b/vendor/google.golang.org/grpc/balancer/rls/config.go index 427cb6b63f..9693c8ba95 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/config.go +++ b/vendor/google.golang.org/grpc/balancer/rls/config.go @@ -265,7 +265,7 @@ func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { return nil, fmt.Errorf("rls: cache_size_bytes must be set to a non-zero value: %+v", rlsProto) } if cacheSizeBytes > maxCacheSize { - logger.Info("rls: cache_size_bytes %v is too large, setting it to: %v", cacheSizeBytes, maxCacheSize) + logger.Infof("rls: cache_size_bytes %v is too large, setting it to: %v", cacheSizeBytes, maxCacheSize) cacheSizeBytes = maxCacheSize } return &lbConfig{ diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index d7cad7acfe..483b51e84e 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -185,7 +185,7 @@ func (b *wrrBalancer) updateEndpointsLocked(endpoints []resolver.Endpoint) { ew.updateConfig(b.cfg) } - for _, endpoint := range b.endpointToWeight.Keys() { + for endpoint := range b.endpointToWeight.All() { if _, ok := endpointSet.Get(endpoint); ok { // Existing endpoint also in new endpoint list; skip. continue @@ -412,7 +412,7 @@ func (b *wrrBalancer) Close() { b.mu.Unlock() // Ensure any lingering OOB watchers are stopped. - for _, ew := range b.endpointToWeight.Values() { + for _, ew := range b.endpointToWeight.All() { if ew.stopORCAListener != nil { ew.stopORCAListener() } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 42c61cf9fe..296123e20d 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go index 329e3691f0..c7fcf5e293 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -27,6 +27,7 @@ import ( "net" core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/internal/mem" ) // ALTSRecordCrypto is the interface for gRPC ALTS record protocol. @@ -62,8 +63,6 @@ const ( altsRecordDefaultLength = 4 * 1024 // 4KiB // Message type value included in ALTS record framing. altsRecordMsgType = uint32(0x06) - // The initial write buffer size. - altsWriteBufferInitialSize = 32 * 1024 // 32KiB // The maximum write buffer size. This *must* be multiple of // altsRecordDefaultLength. altsWriteBufferMaxSize = 512 * 1024 // 512KiB @@ -74,9 +73,26 @@ const ( ) var ( - protocols = make(map[string]ALTSRecordFunc) + protocols = make(map[string]ALTSRecordFunc) + writeBufPool *mem.BinaryTieredBufferPool ) +func init() { + pool, err := mem.NewDirtyBinaryTieredBufferPool( + 8, + 12, // Go page size, 4KB + 14, // 16KB (max HTTP/2 frame size used by gRPC) + 15, // 32KB (default buffer size for gRPC) + 16, // 64KB + 17, // 128KB + 19, // 512KB, max write buffer size + ) + if err != nil { + panic(fmt.Sprintf("Failed to create write buffer pool: %v", err)) + } + writeBufPool = pool +} + // RegisterProtocol register a ALTS record encryption protocol. func RegisterProtocol(protocol string, f ALTSRecordFunc) error { if _, ok := protocols[protocol]; ok { @@ -97,9 +113,6 @@ type conn struct { // protected holds data read from the network but have not yet been // decrypted. This data might not compose a complete frame. protected []byte - // writeBuf is a buffer used to contain encrypted frames before being - // written to the network. - writeBuf []byte // nextFrame stores the next frame (in protected buffer) info. nextFrame []byte // overhead is the calculated overhead of each frame. @@ -132,7 +145,6 @@ func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, prot crypto: crypto, payloadLengthLimit: payloadLengthLimit, protected: protectedBuf, - writeBuf: make([]byte, altsWriteBufferInitialSize), nextFrame: protectedBuf, overhead: overhead, } @@ -233,16 +245,16 @@ func (p *conn) Write(b []byte) (n int, err error) { // Calculate the output buffer size with framing and encryption overhead. numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) size := len(b) + numOfFrames*p.overhead - // If writeBuf is too small, increase its size up to the maximum size. partialBSize := len(b) if size > altsWriteBufferMaxSize { size = altsWriteBufferMaxSize const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit } - if len(p.writeBuf) < size { - p.writeBuf = make([]byte, size) - } + // Get a writeBuf of the required length. + bufHandle := writeBufPool.Get(size) + defer writeBufPool.Put(bufHandle) + writeBuf := *bufHandle for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { partialBEnd := partialBStart + partialBSize @@ -263,7 +275,7 @@ func (p *conn) Write(b []byte) (n int, err error) { // if any. // 1. Fill in type field. - msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] + msg := writeBuf[writeBufIndex+MsgLenFieldSize:] binary.LittleEndian.PutUint32(msg, altsRecordMsgType) // 2. Encrypt the payload and create a tag if any. @@ -273,12 +285,12 @@ func (p *conn) Write(b []byte) (n int, err error) { } // 3. Fill in the size field. - binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) + binary.LittleEndian.PutUint32(writeBuf[writeBufIndex:], uint32(len(msg))) // 4. Increase writeBufIndex. writeBufIndex += len(buf) + p.overhead } - nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) + nn, err := p.Conn.Write(writeBuf[:writeBufIndex]) if err != nil { // We need to calculate the actual data size that was // written. This means we need to remove header, diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 50428a5488..a9b926cc37 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 2222e6debc..f818df8d5d 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index 44b99ce740..2dee74f16f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.6.0 +// - protoc-gen-go-grpc v1.6.1 // - protoc v5.27.1 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index cc335a3ce9..0e9f95a5bd 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 0bcd16dbbf..a6083c3b03 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -22,7 +22,6 @@ import ( "context" "crypto/tls" "crypto/x509" - "errors" "fmt" "net" "net/url" @@ -52,22 +51,21 @@ func (t TLSInfo) AuthType() string { } // ValidateAuthority validates the provided authority being used to override the -// :authority header by verifying it against the peer certificates. It returns a +// :authority header by verifying it against the peer certificate. It returns a // non-nil error if the validation fails. func (t TLSInfo) ValidateAuthority(authority string) error { - var errs []error host, _, err := net.SplitHostPort(authority) if err != nil { host = authority } - for _, cert := range t.State.PeerCertificates { - var err error - if err = cert.VerifyHostname(host); err == nil { - return nil - } - errs = append(errs, err) + + // Verify authority against the leaf certificate. + if len(t.State.PeerCertificates) == 0 { + // This is not expected to happen as the TLS handshake has already + // completed and should have populated PeerCertificates. + return fmt.Errorf("credentials: no peer certificates found to verify authority %q", host) } - return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...)) + return t.State.PeerCertificates[0].VerifyHostname(host) } // cipherSuiteLookup returns the string version of a TLS cipher suite ID. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7a5ac2e7c4..4ec5f9cd09 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -705,10 +705,11 @@ func WithDisableHealthCheck() DialOption { func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ - ReadBufferSize: defaultReadBufSize, - WriteBufferSize: defaultWriteBufSize, - UserAgent: grpcUA, - BufferPool: mem.DefaultBufferPool(), + ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, + SharedWriteBuffer: true, + UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, idleTimeout: 30 * time.Minute, diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 8f7d9f6bbe..dcb98cdbc1 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index e99cd5c838..9e10fdd2eb 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.6.0 +// - protoc-gen-go-grpc v1.6.1 // - protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index cd1a9ed171..8acb94276a 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -338,6 +338,16 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { // closed after timeout. Cleanup work (closing sub-balancer and removing // subconns) will be done after timeout. func (bg *BalancerGroup) Remove(id string) { + bg.removeInternal(id, true) +} + +// RemoveImmediately removes and closes the balancer with id from the group +// immediately. +func (bg *BalancerGroup) RemoveImmediately(id string) { + bg.removeInternal(id, false) +} + +func (bg *BalancerGroup) removeInternal(id string, withCaching bool) { bg.logger.Infof("Removing child policy for child %q", id) bg.outgoingMu.Lock() @@ -356,32 +366,40 @@ func (bg *BalancerGroup) Remove(id string) { // Unconditionally remove the sub-balancer config from the map. delete(bg.idToBalancerConfig, id) - if bg.deletedBalancerCache != nil { - if bg.logger.V(2) { - bg.logger.Infof("Adding child policy for child %q to the balancer cache", id) - bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) - } - - bg.deletedBalancerCache.Add(id, sbToRemove, func() { + if withCaching { + if bg.deletedBalancerCache != nil { + if bg.logger.V(2) { + bg.logger.Infof("Adding child policy for child %q to the balancer cache", id) + } + bg.deletedBalancerCache.Add(id, sbToRemove, func() { + if bg.logger.V(2) { + bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + + // A sub-balancer evicted from the timeout cache needs to closed + // and its subConns need to removed, unconditionally. There is a + // possibility that a sub-balancer might be removed (thereby + // moving it to the cache) around the same time that the + // balancergroup is closed, and by the time we get here the + // balancergroup might be closed. Check for `outgoingStarted == + // true` at that point can lead to a leaked sub-balancer. + bg.outgoingMu.Lock() + sbToRemove.stopBalancer() + bg.outgoingMu.Unlock() + bg.cleanupSubConns(sbToRemove) + }) if bg.logger.V(2) { - bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } - - // A sub-balancer evicted from the timeout cache needs to closed - // and its subConns need to removed, unconditionally. There is a - // possibility that a sub-balancer might be removed (thereby - // moving it to the cache) around the same time that the - // balancergroup is closed, and by the time we get here the - // balancergroup might be closed. Check for `outgoingStarted == - // true` at that point can lead to a leaked sub-balancer. - bg.outgoingMu.Lock() - sbToRemove.stopBalancer() bg.outgoingMu.Unlock() - bg.cleanupSubConns(sbToRemove) - }) - bg.outgoingMu.Unlock() - return + return + } + + // Fall through to remove the sub-balancer with immediate effect if we are not caching. + if bg.logger.V(2) { + bg.logger.Infof("Child policy for child %q was requested to be cached before eventual removal. No such cache exists. Removing right away.", id) + } } // Remove the sub-balancer with immediate effect if we are not caching. @@ -481,7 +499,7 @@ func (bg *BalancerGroup) ResolverError(err error) { // from map. Delete sc from the map only when state changes to Shutdown. Since // it's just forwarding the action, there's no need for a removeSubConn() // wrapper function. -func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { +func (bg *BalancerGroup) newSubConn(sbw *subBalancerWrapper, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { // NOTE: if balancer with id was already removed, this should also return // error. But since we call balancer.stopBalancer when removing the balancer, this // shouldn't happen. @@ -493,12 +511,12 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver var sc balancer.SubConn oldListener := opts.StateListener opts.StateListener = func(state balancer.SubConnState) { bg.updateSubConnState(sc, state, oldListener) } - sc, err := bg.cc.NewSubConn(addrs, opts) + sc, err := sbw.ClientConn.NewSubConn(addrs, opts) if err != nil { bg.incomingMu.Unlock() return nil, err } - bg.scToSubBalancer[sc] = config + bg.scToSubBalancer[sc] = sbw bg.incomingMu.Unlock() return sc, nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 7ad6fb44ca..3ae45faa40 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -54,17 +54,16 @@ var ( // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by - // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the - // implementation of A76 is stable, we will flip the default value to false - // in a subsequent release. A final release will remove this environment - // variable, enabling the new behavior unconditionally. - XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true) + // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". A future + // release will remove this environment variable, enabling the new behavior + // unconditionally. + XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", false) // RingHashSetRequestHashKey is set if the ring hash balancer can get the // request hash header by setting the "requestHashHeader" field, according - // to gRFC A76. It can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". - RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) + // to gRFC A76. It can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "false". + RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", true) // ALTSHandshakerKeepaliveParams is set if we should add the // KeepaliveParams when dial the ALTS handshaker service. @@ -78,6 +77,14 @@ var ( // - The DNS resolver is being used. EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) + // CaseSensitiveBalancerRegistries is set if the balancer registry should be + // case-sensitive. This is disabled by default, but can be enabled by setting + // the env variable "GRPC_GO_EXPERIMENTAL_CASE_SENSITIVE_BALANCER_REGISTRIES" + // to "true". + // + // TODO: After 2 releases, we will enable the env var by default. + CaseSensitiveBalancerRegistries = boolFromEnv("GRPC_GO_EXPERIMENTAL_CASE_SENSITIVE_BALANCER_REGISTRIES", false) + // XDSAuthorityRewrite indicates whether xDS authority rewriting is enabled. // This feature is defined in gRFC A81 and is enabled by setting the // environment variable GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE to "true". @@ -89,6 +96,14 @@ var ( // GRPC_EXPERIMENTAL_PF_WEIGHTED_SHUFFLING to "false". PickFirstWeightedShuffling = boolFromEnv("GRPC_EXPERIMENTAL_PF_WEIGHTED_SHUFFLING", true) + // XDSRecoverPanicInResourceParsing indicates whether the xdsclient should + // recover from panics while parsing xDS resources. + // + // This feature can be disabled (e.g. for fuzz testing) by setting the + // environment variable "GRPC_GO_EXPERIMENTAL_XDS_RESOURCE_PANIC_RECOVERY" + // to "false". + XDSRecoverPanicInResourceParsing = boolFromEnv("GRPC_GO_EXPERIMENTAL_XDS_RESOURCE_PANIC_RECOVERY", true) + // DisableStrictPathChecking indicates whether strict path checking is // disabled. This feature can be disabled by setting the environment // variable GRPC_GO_EXPERIMENTAL_DISABLE_STRICT_PATH_CHECKING to "true". @@ -104,6 +119,13 @@ var ( // A future release will remove this environment variable, enabling strict // path checking behavior unconditionally. DisableStrictPathChecking = boolFromEnv("GRPC_GO_EXPERIMENTAL_DISABLE_STRICT_PATH_CHECKING", false) + + // EnablePriorityLBChildPolicyCache controls whether the priority balancer + // should cache child balancers that are removed from the LB policy config, + // for a period of 15 minutes. This is disabled by default, but can be + // enabled by setting the env variable + // GRPC_EXPERIMENTAL_ENABLE_PRIORITY_LB_CHILD_POLICY_CACHE to true. + EnablePriorityLBChildPolicyCache = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_PRIORITY_LB_CHILD_POLICY_CACHE", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/mem/buffer_pool.go b/vendor/google.golang.org/grpc/internal/mem/buffer_pool.go new file mode 100644 index 0000000000..c2348a82ef --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/mem/buffer_pool.go @@ -0,0 +1,338 @@ +/* + * + * Copyright 2026 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +package mem + +import ( + "fmt" + "math/bits" + "slices" + "sort" + "sync" +) + +const ( + goPageSize = 4 * 1024 // 4KiB. N.B. this must be a power of 2. +) + +var uintSize = bits.UintSize // use a variable for mocking during tests. + +// bufferPool is a copy of the public bufferPool interface used to avoid +// circular dependencies. +type bufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. + Put(*[]byte) +} + +// BinaryTieredBufferPool is a buffer pool that uses multiple sub-pools with +// power-of-two sizes. +type BinaryTieredBufferPool struct { + // exponentToNextLargestPoolMap maps a power-of-two exponent (e.g., 12 for + // 4KB) to the index of the next largest sizedBufferPool. This is used by + // Get() to find the smallest pool that can satisfy a request for a given + // size. + exponentToNextLargestPoolMap []int + // exponentToPreviousLargestPoolMap maps a power-of-two exponent to the + // index of the previous largest sizedBufferPool. This is used by Put() + // to return a buffer to the most appropriate pool based on its capacity. + exponentToPreviousLargestPoolMap []int + sizedPools []bufferPool + fallbackPool bufferPool + maxPoolCap int // Optimization: Cache max capacity +} + +// NewBinaryTieredBufferPool returns a BufferPool backed by multiple sub-pools. +// This structure enables O(1) lookup time for Get and Put operations. +// +// The arguments provided are the exponents for the buffer capacities (powers +// of 2), not the raw byte sizes. For example, to create a pool of 16KB buffers +// (2^14 bytes), pass 14 as the argument. +func NewBinaryTieredBufferPool(powerOfTwoExponents ...uint8) (*BinaryTieredBufferPool, error) { + return newBinaryTiered(func(size int) bufferPool { + return newSizedBufferPool(size, true) + }, &simpleBufferPool{shouldZero: true}, powerOfTwoExponents...) +} + +// NewDirtyBinaryTieredBufferPool returns a BufferPool backed by multiple +// sub-pools. It is similar to NewBinaryTieredBufferPool but it does not +// initialize the buffers before returning them. +func NewDirtyBinaryTieredBufferPool(powerOfTwoExponents ...uint8) (*BinaryTieredBufferPool, error) { + return newBinaryTiered(func(size int) bufferPool { + return newSizedBufferPool(size, false) + }, &simpleBufferPool{shouldZero: false}, powerOfTwoExponents...) +} + +func newBinaryTiered(sizedPoolFactory func(int) bufferPool, fallbackPool bufferPool, powerOfTwoExponents ...uint8) (*BinaryTieredBufferPool, error) { + slices.Sort(powerOfTwoExponents) + powerOfTwoExponents = slices.Compact(powerOfTwoExponents) + + // Determine the maximum exponent we need to support. This depends on the + // word size (32-bit vs 64-bit). + maxExponent := uintSize - 2 + indexOfNextLargestBit := slices.Repeat([]int{-1}, maxExponent+1) + indexOfPreviousLargestBit := slices.Repeat([]int{-1}, maxExponent+1) + + maxTier := 0 + pools := make([]bufferPool, 0, len(powerOfTwoExponents)) + + for i, exp := range powerOfTwoExponents { + // Allocating slices of size > 2^maxExponent isn't possible on + // maxExponent-bit machines. + if int(exp) > maxExponent { + return nil, fmt.Errorf("mem: allocating slice of size 2^%d is not possible", exp) + } + tierSize := 1 << exp + pools = append(pools, sizedPoolFactory(tierSize)) + maxTier = max(maxTier, tierSize) + + // Map the exact power of 2 to this pool index. + indexOfNextLargestBit[exp] = i + indexOfPreviousLargestBit[exp] = i + } + + // Fill gaps for Get() (Next Largest) + // We iterate backwards. If current is empty, take the value from the right (larger). + for i := maxExponent - 1; i >= 0; i-- { + if indexOfNextLargestBit[i] == -1 { + indexOfNextLargestBit[i] = indexOfNextLargestBit[i+1] + } + } + + // Fill gaps for Put() (Previous Largest) + // We iterate forwards. If current is empty, take the value from the left (smaller). + for i := 1; i <= maxExponent; i++ { + if indexOfPreviousLargestBit[i] == -1 { + indexOfPreviousLargestBit[i] = indexOfPreviousLargestBit[i-1] + } + } + + return &BinaryTieredBufferPool{ + exponentToNextLargestPoolMap: indexOfNextLargestBit, + exponentToPreviousLargestPoolMap: indexOfPreviousLargestBit, + sizedPools: pools, + maxPoolCap: maxTier, + fallbackPool: fallbackPool, + }, nil +} + +// Get returns a buffer with specified length from the pool. +func (b *BinaryTieredBufferPool) Get(size int) *[]byte { + return b.poolForGet(size).Get(size) +} + +func (b *BinaryTieredBufferPool) poolForGet(size int) bufferPool { + if size == 0 || size > b.maxPoolCap { + return b.fallbackPool + } + + // Calculate the exponent of the smallest power of 2 >= size. + // We subtract 1 from size to handle exact powers of 2 correctly. + // + // Examples: + // size=16 (0b10000) -> size-1=15 (0b01111) -> bits.Len=4 -> Pool for 2^4 + // size=17 (0b10001) -> size-1=16 (0b10000) -> bits.Len=5 -> Pool for 2^5 + querySize := uint(size - 1) + poolIdx := b.exponentToNextLargestPoolMap[bits.Len(querySize)] + + return b.sizedPools[poolIdx] +} + +// Put returns a buffer to the pool. +func (b *BinaryTieredBufferPool) Put(buf *[]byte) { + // We pass the capacity of the buffer, and not the size of the buffer here. + // If we did the latter, all buffers would eventually move to the smallest + // pool. + b.poolForPut(cap(*buf)).Put(buf) +} + +func (b *BinaryTieredBufferPool) poolForPut(bCap int) bufferPool { + if bCap == 0 { + return NopBufferPool{} + } + if bCap > b.maxPoolCap { + return b.fallbackPool + } + // Find the pool with the largest capacity <= bCap. + // + // We calculate the exponent of the largest power of 2 <= bCap. + // bits.Len(x) returns the minimum number of bits required to represent x; + // i.e. the number of bits up to and including the most significant bit. + // Subtracting 1 gives the 0-based index of the most significant bit, + // which is the exponent of the largest power of 2 <= bCap. + // + // Examples: + // cap=16 (0b10000) -> Len=5 -> 5-1=4 -> 2^4 + // cap=15 (0b01111) -> Len=4 -> 4-1=3 -> 2^3 + largestPowerOfTwo := bits.Len(uint(bCap)) - 1 + poolIdx := b.exponentToPreviousLargestPoolMap[largestPowerOfTwo] + // The buffer is smaller than the smallest power of 2, discard it. + if poolIdx == -1 { + // Buffer is smaller than our smallest pool bucket. + return NopBufferPool{} + } + return b.sizedPools[poolIdx] +} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a TieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int + shouldZero bool +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } + b := *buf + if p.shouldZero { + clear(b[:cap(b)]) + } + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int, zero bool) *sizedBufferPool { + return &sizedBufferPool{ + defaultSize: size, + shouldZero: zero, + } +} + +// TieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type TieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) *TieredBufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s, true) + } + return &TieredBufferPool{ + sizedPools: pools, + fallbackPool: simpleBufferPool{shouldZero: true}, + } +} + +// Get returns a buffer with specified length from the pool. +func (p *TieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +// Put returns a buffer to the pool. +func (p *TieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *TieredBufferPool) getPool(size int) bufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool + shouldZero bool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + if p.shouldZero { + clear((*bs)[:cap(*bs)]) + } + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + // If we're going to allocate, round up to the nearest page. This way if + // requests frequently arrive with small variation we don't allocate + // repeatedly if we get unlucky and they increase over time. By default we + // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we + // can round up efficiently. + allocSize := (size + goPageSize - 1) & ^(goPageSize - 1) + + b := make([]byte, size, allocSize) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 99f9a7ae8d..7259525a19 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/lookup/v1/rls.proto diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index ac99b59465..58d0e28c16 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/lookup/v1/rls_config.proto diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 5b34012939..0386dcf5cf 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.6.0 +// - protoc-gen-go-grpc v1.6.1 // - protoc v5.27.1 // source: grpc/lookup/v1/rls.proto diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index bc8ee07474..0b2269a503 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -46,6 +46,7 @@ const ( defaultWriteQuota = 64 * 1024 defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) + upcomingDefaultHeaderListSize = uint32(8 << 10) ) // MaxStreamID is the upper bound for the stream ID before the current diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 37b1acc340..c943503f35 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -871,11 +871,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr, handler s } var sz int64 for _, f := range hdr.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + sz += int64(f.Size()) + if sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false } } + if sz > int64(upcomingDefaultHeaderListSize) { + t.logger.Warningf("Header list size to send (%d bytes) is larger than the upcoming default limit (%d bytes). In a future release, this will be restricted to %d bytes.", sz, upcomingDefaultHeaderListSize, upcomingDefaultHeaderListSize) + } return true } for { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index a1a14e14fc..3a8c36e4f9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -940,13 +940,17 @@ func (t *http2Server) checkForHeaderListSize(hf []hpack.HeaderField) bool { } var sz int64 for _, f := range hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + sz += int64(f.Size()) + if sz > int64(*t.maxSendHeaderListSize) { if t.logger.V(logLevel) { t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } } + if sz > int64(upcomingDefaultHeaderListSize) { + t.logger.Warningf("Header list size to send (%d bytes) is larger than the upcoming default limit (%d bytes). In a future release, this will be restricted to %d bytes.", sz, upcomingDefaultHeaderListSize, upcomingDefaultHeaderListSize) + } return true } diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/balancer.go b/vendor/google.golang.org/grpc/internal/xds/balancer/balancer.go index af3f999a12..c61fd9dc0b 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/balancer.go @@ -25,7 +25,6 @@ import ( _ "google.golang.org/grpc/internal/xds/balancer/cdsbalancer" // Register the CDS balancer _ "google.golang.org/grpc/internal/xds/balancer/clusterimpl" // Register the xds_cluster_impl balancer _ "google.golang.org/grpc/internal/xds/balancer/clustermanager" // Register the xds_cluster_manager balancer - _ "google.golang.org/grpc/internal/xds/balancer/clusterresolver" // Register the xds_cluster_resolver balancer _ "google.golang.org/grpc/internal/xds/balancer/outlierdetection" // Register the outlier_detection balancer _ "google.golang.org/grpc/internal/xds/balancer/priority" // Register the priority balancer ) diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cdsbalancer.go index c00a81b982..a73d5ff46c 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cdsbalancer.go @@ -25,6 +25,7 @@ import ( "sync/atomic" "unsafe" + "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" @@ -33,34 +34,29 @@ import ( "google.golang.org/grpc/internal/balancer/nop" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/balancer/clusterresolver" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/balancer/outlierdetection" + "google.golang.org/grpc/internal/xds/balancer/priority" "google.golang.org/grpc/internal/xds/xdsclient" "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" + "google.golang.org/grpc/internal/xds/xdsdepmgr" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -const ( - cdsName = "cds_experimental" - aggregateClusterMaxDepth = 16 -) +const cdsName = "cds_experimental" var ( - errBalancerClosed = fmt.Errorf("cds_experimental LB policy is closed") - errExceedsMaxDepth = fmt.Errorf("aggregate cluster graph exceeds max depth (%d)", aggregateClusterMaxDepth) - - // newChildBalancer is a helper function to build a new cluster_resolver - // balancer and will be overridden in unittests. + // newChildBalancer is a helper function to build a new priority balancer + // and will be overridden in unittests. newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { - builder := balancer.Get(clusterresolver.Name) + builder := balancer.Get(priority.Name) if builder == nil { - return nil, fmt.Errorf("xds: no balancer builder with name %v", clusterresolver.Name) + return nil, fmt.Errorf("xds: no balancer builder with name %v", priority.Name) } - // We directly pass the parent clientConn to the underlying - // cluster_resolver balancer because the cdsBalancer does not deal with - // subConns. + // We directly pass the parent clientConn to the underlying priority + // balancer because the cdsBalancer does not deal with subConns. return builder.Build(cc, opts), nil } buildProvider = buildProviderFunc @@ -81,30 +77,28 @@ type bb struct{} // Build creates a new CDS balancer with the ClientConn. func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - builder := balancer.Get(clusterresolver.Name) + builder := balancer.Get(priority.Name) if builder == nil { - // Shouldn't happen, registered through imported Cluster Resolver, + // Shouldn't happen, registered through imported Priority builder. Still, // defensive programming. - logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) - return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) + logger.Errorf("%q LB policy is needed but not registered", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", priority.Name)) } parser, ok := builder.(balancer.ConfigParser) if !ok { - // Shouldn't happen, imported Cluster Resolver builder has this method. - logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) - return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) + // Shouldn't happen, imported Priority builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", priority.Name)) } - ctx, cancel := context.WithCancel(context.Background()) hi := xdsinternal.NewHandshakeInfo(nil, nil, nil, false) xdsHIPtr := unsafe.Pointer(hi) b := &cdsBalancer{ bOpts: opts, childConfigParser: parser, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, xdsHIPtr: &xdsHIPtr, - watchers: make(map[string]*watcherState), + clusterConfigs: make(map[string]*xdsresource.ClusterResult), + priorityConfigs: make(map[string]*priorityConfig), } b.logger = prefixLogger(b) b.ccw = &ccWrapper{ @@ -168,16 +162,26 @@ type cdsBalancer struct { xdsHIPtr *unsafe.Pointer // Accessed atomically. - // The serializer and its cancel func are initialized at build time, and the - // rest of the fields here are only accessed from serializer callbacks (or - // from balancer.Balancer methods, which themselves are guaranteed to be - // mutually exclusive) and hence do not need to be guarded by a mutex. - serializer *grpcsync.CallbackSerializer // Serializes updates from gRPC and xDS client. - serializerCancel context.CancelFunc // Stops the above serializer. - childLB balancer.Balancer // Child policy, built upon resolution of the cluster graph. - xdsClient xdsclient.XDSClient // xDS client to watch Cluster resources. - watchers map[string]*watcherState // Set of watchers and associated state, keyed by cluster name. - lbCfg *lbConfig // Current load balancing configuration. + // All fields below are accessed only from methods implementing the + // balancer.Balancer interface. Since gRPC guarantees that these methods are + // never invoked concurrently, no additional synchronization is required to + // protect access to these fields. + xdsClient xdsclient.XDSClient + childLB balancer.Balancer // Child policy, built upon resolution of the cluster graph. + clusterConfigs map[string]*xdsresource.ClusterResult // Cluster name to the last received result for that cluster. + priorityConfigs map[string]*priorityConfig // Hostname to priority config for that leaf cluster. + lbCfg *lbConfig // Current load balancing configuration. + priorities []*priorityConfig // List of priorities in the order. + unsubscribe func() // For dynamic cluster unsubscription. + isSubscribed bool // True if a dynamic cluster has been subscribed to. + clusterSubscriber xdsdepmgr.ClusterSubscriber // To subscribe to dynamic cluster resource. + xdsLBPolicy internalserviceconfig.BalancerConfig // Stores the locality and endpoint picking policy. + attributes *attributes.Attributes // Attributes from resolver state. + serviceConfig *serviceconfig.ParseResult + // Each new leaf cluster needs a child name generator to reuse child policy + // names. But to make sure the names across leaf clusters doesn't conflict, + // we need a seq ID. This ID is incremented for each new cluster. + childNameGeneratorSeqID uint64 // The certificate providers are cached here to that they can be closed when // a new provider is to be created. @@ -189,8 +193,6 @@ type cdsBalancer struct { // management server, creates appropriate certificate provider plugins, and // updates the HandshakeInfo which is added as an address attribute in // NewSubConn() calls. -// -// Only executed in the context of a serializer callback. func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { // If xdsCredentials are not in use, i.e, the user did not want to get // security configuration from an xDS server, we should not be acting on the @@ -273,24 +275,11 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } -// A convenience method to create a watcher for cluster `name`. It also -// registers the watch with the xDS client, and adds the newly created watcher -// to the list of watchers maintained by the LB policy. -func (b *cdsBalancer) createAndAddWatcherForCluster(name string) { - w := &clusterWatcher{ - name: name, - parent: b, - } - ws := &watcherState{ - watcher: w, - cancelWatch: xdsresource.WatchCluster(b.xdsClient, name, w), - } - b.watchers[name] = ws -} - -// UpdateClientConnState receives the serviceConfig (which contains the -// clusterName to watch for in CDS) and the xdsClient object from the -// xdsResolver. +// UpdateClientConnState receives the serviceConfig, xdsConfig, +// ClusterSubscriber and the xdsClient object from the xdsResolver. If an error +// is encountered, the parent (clustermanager) sets the corresponding cluster’s +// picker to transient_failure. Otherwise, the received configuration is +// processed and forwarded to the appropriate child policy. func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.xdsClient == nil { c := xdsclient.FromResolverState(state.ResolverState) @@ -302,6 +291,17 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro } b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig)) + xdsConfig := xdsresource.XDSConfigFromResolverState(state.ResolverState) + if xdsConfig == nil { + b.logger.Warningf("Received balancer config with no xDS config") + return balancer.ErrBadResolverState + } + b.clusterConfigs = xdsConfig.Clusters + b.clusterSubscriber = xdsdepmgr.XDSClusterSubscriberFromResolverState(state.ResolverState) + if b.clusterSubscriber == nil { + b.logger.Warningf("Received balancer config with no cluster subscriber") + return balancer.ErrBadResolverState + } // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not // something that is received on the wire. @@ -315,74 +315,220 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro return balancer.ErrBadResolverState } - // Do nothing and return early if configuration has not changed. - if b.lbCfg != nil && b.lbCfg.ClusterName == lbCfg.ClusterName { + b.lbCfg = lbCfg + b.serviceConfig = state.ResolverState.ServiceConfig + b.attributes = state.ResolverState.Attributes + return b.handleXDSConfigUpdate() +} + +// handleXDSConfigUpdate processes the XDSConfig update from the xDS resolver. +func (b *cdsBalancer) handleXDSConfigUpdate() error { + clusterName := b.lbCfg.ClusterName + + // If the cluster is dynamic and we dont have a subscription yet, create + // one. + if b.lbCfg.IsDynamic && !b.isSubscribed { + b.unsubscribe = b.clusterSubscriber.SubscribeToCluster(clusterName) + b.isSubscribed = true return nil } - b.lbCfg = lbCfg - // Handle the update in a blocking fashion. - errCh := make(chan error, 1) - callback := func(context.Context) { - // A config update with a changed top-level cluster name means that none - // of our old watchers make any sense any more. - b.closeAllWatchers() - - // Create a new watcher for the top-level cluster. Upon resolution, it - // could end up creating more watchers if turns out to be an aggregate - // cluster. - b.createAndAddWatcherForCluster(lbCfg.ClusterName) - errCh <- nil - } - onFailure := func() { - // The call to Schedule returns false *only* if the serializer has been - // closed, which happens only when we receive an update after close. - errCh <- errBalancerClosed - } - b.serializer.ScheduleOr(callback, onFailure) - return <-errCh + clusterUpdate, ok := b.clusterConfigs[clusterName] + if !ok { + // If the cluster is missing from the config, check if it is dynamic. + // For dynamic clusters, the xDS config may be updated before the + // corresponding cluster resource is received. This should never occur + // for static clusters. + if b.lbCfg.IsDynamic { + return nil + } + return b.annotateErrorWithNodeID(fmt.Errorf("did not find the cluster %q in XDSConfig", clusterName)) + } + // If the cluster resource has an error, return the error. + if clusterUpdate.Err != nil { + return clusterUpdate.Err + } + + if err := b.handleSecurityConfig(clusterUpdate.Config.Cluster.SecurityCfg); err != nil { + // If the security config is invalid, for example, if the provider + // instance is not found in the bootstrap config, we need to put the + // channel in transient failure. + return b.annotateErrorWithNodeID(fmt.Errorf("received Cluster resource that contains invalid security config: %v", err)) + + } + return b.handleClusterUpdate() } -// ResolverError handles errors reported by the xdsResolver. -func (b *cdsBalancer) ResolverError(err error) { - b.serializer.TrySchedule(func(context.Context) { - // Missing Listener or RouteConfiguration on the management server - // results in a 'resource not found' error from the xDS resolver. In - // these cases, we should stap watching all of the current clusters - // being watched. - if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { - b.closeAllWatchers() - b.closeChildPolicyAndReportTF(err) - return +// handleClusterUpdate handles a good XDSConfig update from the xDS resolver. +// Builds the child policy config and pushes it down. +func (b *cdsBalancer) handleClusterUpdate() error { + clusterName := b.lbCfg.ClusterName + clusterConfig := b.clusterConfigs[clusterName].Config + + var newPriorities []*priorityConfig + switch clusterConfig.Cluster.ClusterType { + case xdsresource.ClusterTypeEDS, xdsresource.ClusterTypeLogicalDNS: + p := b.updatePriorityConfig(clusterName, &clusterConfig) + newPriorities = append(newPriorities, p) + case xdsresource.ClusterTypeAggregate: + for _, leaf := range clusterConfig.AggregateConfig.LeafClusters { + leafCluster := b.clusterConfigs[leaf] + // Update priority config for leaf clusters. + p := b.updatePriorityConfig(leaf, &leafCluster.Config) + newPriorities = append(newPriorities, p) } - var root string - if b.lbCfg != nil { - root = b.lbCfg.ClusterName + } + b.priorities = newPriorities + + if err := b.updateOutlierDetection(); err != nil { + return b.annotateErrorWithNodeID(fmt.Errorf("failed to correctly update Outlier Detection config %v", err)) + } + + // The LB policy is configured by the root cluster. + if err := json.Unmarshal(clusterConfig.Cluster.LBPolicy, &b.xdsLBPolicy); err != nil { + return b.annotateErrorWithNodeID(fmt.Errorf("error unmarshalling xDS LB Policy: %v", err)) + } + if err := b.updateChildConfig(); err != nil { + return b.annotateErrorWithNodeID(err) + } + return nil +} + +// updateChildConfig builds child policy configuration using endpoint addresses +// returned from the XDSConfig and child policy configuration. +// +// A child policy is created if one doesn't already exist. The newly built +// configuration is then pushed to the child policy. +func (b *cdsBalancer) updateChildConfig() error { + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) + if err != nil { + return fmt.Errorf("failed to create child policy of type %s: %v", priority.Name, err) } - b.onClusterError(root, err) - }) + b.childLB = childLB + } + + childCfgBytes, endpoints, err := buildPriorityConfigJSON(b.priorities, &b.xdsLBPolicy) + if err != nil { + return fmt.Errorf("failed to build child policy config: %v", err) + } + childCfg, err := b.childConfigParser.ParseConfig(childCfgBytes) + if err != nil { + return fmt.Errorf("failed to parse child policy config. This should never happen because the config was generated: %v", err) + } + if b.logger.V(2) { + b.logger.Infof("Built child policy config: %s", pretty.ToJSON(childCfg)) + } + + for i := range endpoints { + for j := range endpoints[i].Addresses { + addr := endpoints[i].Addresses[j] + addr.BalancerAttributes = endpoints[i].Attributes + // BalancerAttributes need to be present in endpoint addresses. This + // temporary workaround is required to make load reporting work + // with the old pickfirst policy which creates SubConns with multiple + // addresses. Since the addresses can be from different localities, + // an Address.BalancerAttribute is used to identify the locality of the + // address used by the transport. This workaround can be removed once + // the old pickfirst is removed. + // See https://github.com/grpc/grpc-go/issues/7339 + endpoints[i].Addresses[j] = addr + } + } + if err := b.childLB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: endpoints, + ServiceConfig: b.serviceConfig, + Attributes: b.attributes, + }, + BalancerConfig: childCfg, + }); err != nil { + return fmt.Errorf("failed to push config to child policy: %v", err) + } + return nil } -// UpdateSubConnState handles subConn updates from gRPC. -func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +// updatePriorityConfig updates the priority configuration for the specified EDS +// or DNS cluster, creating it if it does not already exist. +func (b *cdsBalancer) updatePriorityConfig(clusterName string, clusterConfig *xdsresource.ClusterConfig) *priorityConfig { + name := hostName(clusterName, *clusterConfig.Cluster) + pc, ok := b.priorityConfigs[name] + if !ok { + pc = &priorityConfig{ + childNameGen: newNameGenerator(b.childNameGeneratorSeqID), + } + b.priorityConfigs[name] = pc + // Increment the seq ID for the next new cluster. This is done to make + // sure that the child policy names generated for different clusters + // don't conflict with each other. + b.childNameGeneratorSeqID++ + } + pc.clusterConfig = clusterConfig + return pc } -// Closes all registered cluster watchers and removes them from the internal map. -// -// Only executed in the context of a serializer callback. -func (b *cdsBalancer) closeAllWatchers() { - for name, state := range b.watchers { - state.cancelWatch() - delete(b.watchers, name) +// updateOutlierDetection updates Outlier Detection config for all priorities. +func (b *cdsBalancer) updateOutlierDetection() error { + odBuilder := balancer.Get(outlierdetection.Name) + if odBuilder == nil { + // Shouldn't happen, registered through imported Outlier Detection, + // defensive programming. + return fmt.Errorf("%q LB policy is needed but not registered", outlierdetection.Name) + } + + odParser, ok := odBuilder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Outlier Detection builder has this method. + return fmt.Errorf("%q LB policy does not implement a config parser", outlierdetection.Name) } + + for _, p := range b.priorities { + // Update Outlier Detection Config. + odJSON := p.clusterConfig.Cluster.OutlierDetection + if odJSON == nil { + odJSON = json.RawMessage(`{}`) + } + + lbCfg, err := odParser.ParseConfig(odJSON) + if err != nil { + return fmt.Errorf("error parsing Outlier Detection config %v: %v", odJSON, err) + } + + odCfg, ok := lbCfg.(*outlierdetection.LBConfig) + if !ok { + // Shouldn't happen, Parser built at build time with Outlier + // Detection builder pulled from gRPC LB Registry. + return fmt.Errorf("config parser for Outlier Detection returned config with unexpected type %T: %v", lbCfg, lbCfg) + } + p.outlierDetection = *odCfg + } + return nil +} + +// ResolverError handles errors reported by the xdsResolver. +func (b *cdsBalancer) ResolverError(err error) { + // Missing Listener or RouteConfiguration on the management server + // results in a 'resource not found' error from the xDS resolver. In + // these cases, we should report transient failure. + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.closeChildPolicyAndReportTF(err) + return + } + var root string + if b.lbCfg != nil { + root = b.lbCfg.ClusterName + } + b.onClusterError(root, err) +} + +// UpdateSubConnState handles subConn updates from gRPC. +func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } // closeChildPolicyAndReportTF closes the child policy, if it exists, and // updates the connectivity state of the channel to TransientFailure with an // error picker. -// -// Only executed in the context of a serializer callback. func (b *cdsBalancer) closeChildPolicyAndReportTF(err error) { if b.childLB != nil { b.childLB.Close() @@ -394,40 +540,35 @@ func (b *cdsBalancer) closeChildPolicyAndReportTF(err error) { }) } -// Close cancels the CDS watch, closes the child policy and closes the -// cdsBalancer. +// Close closes the child policy, unsubscribes to the dynamic cluster, and +// closes the cdsBalancer. func (b *cdsBalancer) Close() { - b.serializer.TrySchedule(func(context.Context) { - b.closeAllWatchers() - - if b.childLB != nil { - b.childLB.Close() - b.childLB = nil - } - if b.cachedRoot != nil { - b.cachedRoot.Close() - } - if b.cachedIdentity != nil { - b.cachedIdentity.Close() - } - b.logger.Infof("Shutdown") - }) - b.serializerCancel() - <-b.serializer.Done() + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil + } + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + if b.unsubscribe != nil { + b.unsubscribe() + } + b.logger.Infof("Shutdown") } func (b *cdsBalancer) ExitIdle() { - b.serializer.TrySchedule(func(context.Context) { - if b.childLB == nil { - b.logger.Warningf("Received ExitIdle with no child policy") - return - } - // This implementation assumes the child balancer supports - // ExitIdle (but still checks for the interface's existence to - // avoid a panic if not). If the child does not, no subconns - // will be connected. - b.childLB.ExitIdle() - }) + if b.childLB == nil { + b.logger.Warningf("Received ExitIdle with no child policy") + return + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + b.childLB.ExitIdle() } // Node ID needs to be manually added to errors generated in the following @@ -445,105 +586,8 @@ func (b *cdsBalancer) annotateErrorWithNodeID(err error) error { return fmt.Errorf("[xDS node id: %v]: %w", nodeID, err) } -// Handles a good Cluster update from the xDS client. Kicks off the discovery -// mechanism generation process from the top-level cluster and if the cluster -// graph is resolved, generates child policy config and pushes it down. -// -// Only executed in the context of a serializer callback. -func (b *cdsBalancer) onClusterUpdate(name string, update *xdsresource.ClusterUpdate) { - state := b.watchers[name] - if state == nil { - // We are currently not watching this cluster anymore. Return early. - return - } - - b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update)) - - // Update the watchers map with the update for the cluster. - state.lastUpdate = update - - // For an aggregate cluster, always use the security configuration on the - // root cluster. - if name == b.lbCfg.ClusterName { - // Process the security config from the received update before building the - // child policy or forwarding the update to it. We do this because the child - // policy may try to create a new subConn inline. Processing the security - // configuration here and setting up the handshakeInfo will make sure that - // such attempts are handled properly. - if err := b.handleSecurityConfig(update.SecurityCfg); err != nil { - // If the security config is invalid, for example, if the provider - // instance is not found in the bootstrap config, we need to put the - // channel in transient failure. - b.onClusterError(name, b.annotateErrorWithNodeID(fmt.Errorf("received Cluster resource contains invalid security config: %v", err))) - return - } - } - - clustersSeen := make(map[string]bool) - dms, ok, err := b.generateDMsForCluster(b.lbCfg.ClusterName, 0, nil, clustersSeen) - if err != nil { - b.onClusterError(b.lbCfg.ClusterName, b.annotateErrorWithNodeID(fmt.Errorf("failed to generate discovery mechanisms: %v", err))) - return - } - if ok { - if len(dms) == 0 { - b.onClusterError(b.lbCfg.ClusterName, b.annotateErrorWithNodeID(fmt.Errorf("aggregate cluster graph has no leaf clusters"))) - return - } - // Child policy is built the first time we resolve the cluster graph. - if b.childLB == nil { - childLB, err := newChildBalancer(b.ccw, b.bOpts) - if err != nil { - b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) - return - } - b.childLB = childLB - b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) - } - - // Prepare the child policy configuration, convert it to JSON, have it - // parsed by the child policy to convert it into service config and push - // an update to it. - childCfg := &clusterresolver.LBConfig{ - DiscoveryMechanisms: dms, - // The LB policy is configured by the root cluster. - XDSLBPolicy: b.watchers[b.lbCfg.ClusterName].lastUpdate.LBPolicy, - } - cfgJSON, err := json.Marshal(childCfg) - if err != nil { - // Shouldn't happen, since we just prepared struct. - b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", childCfg) - return - } - - var sc serviceconfig.LoadBalancingConfig - if sc, err = b.childConfigParser.ParseConfig(cfgJSON); err != nil { - b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(cfgJSON), err) - return - } - - ccState := balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), - BalancerConfig: sc, - } - if err := b.childLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) - } - } - // We no longer need the clusters that we did not see in this iteration of - // generateDMsForCluster(). - for cluster, state := range b.watchers { - if !clustersSeen[cluster] { - state.cancelWatch() - delete(b.watchers, cluster) - } - } -} - -// Handles an ambient error Cluster update from the xDS client to not stop -// using the previously seen resource. -// -// Only executed in the context of a serializer callback. +// onClusterAmbientError handles an ambient error, if a childLB already has a +// good update, it should continue using that. func (b *cdsBalancer) onClusterAmbientError(name string, err error) { b.logger.Warningf("Cluster resource %q received ambient error update: %v", name, err) @@ -554,116 +598,14 @@ func (b *cdsBalancer) onClusterAmbientError(name string, err error) { } } -// Handles an error Cluster update from the xDS client to stop using the -// previously seen resource. Propagates the error down to the child policy -// if one exists, and puts the channel in TRANSIENT_FAILURE. -// -// Only executed in the context of a serializer callback. +// onClusterResourceError handles errors to stop using the previously seen +// resource. Propagates the error down to the child policy if one exists, and +// puts the channel in TRANSIENT_FAILURE. func (b *cdsBalancer) onClusterResourceError(name string, err error) { b.logger.Warningf("CDS watch for resource %q reported resource error", name) b.closeChildPolicyAndReportTF(err) } -// Generates discovery mechanisms for the cluster graph rooted at `name`. This -// method is called recursively if `name` corresponds to an aggregate cluster, -// with the base case for recursion being a leaf cluster. If a new cluster is -// encountered when traversing the graph, a watcher is created for it. -// -// Inputs: -// - name: name of the cluster to start from -// - depth: recursion depth of the current cluster, starting from root -// - dms: prioritized list of current discovery mechanisms -// - clustersSeen: cluster names seen so far in the graph traversal -// -// Outputs: -// - new prioritized list of discovery mechanisms -// - boolean indicating if traversal of the aggregate cluster graph is -// complete. If false, the above list of discovery mechanisms is ignored. -// - error indicating if any error was encountered as part of the graph -// traversal. If error is non-nil, the other return values are ignored. -// -// Only executed in the context of a serializer callback. -func (b *cdsBalancer) generateDMsForCluster(name string, depth int, dms []clusterresolver.DiscoveryMechanism, clustersSeen map[string]bool) ([]clusterresolver.DiscoveryMechanism, bool, error) { - if depth >= aggregateClusterMaxDepth { - return dms, false, errExceedsMaxDepth - } - - if clustersSeen[name] { - // Discovery mechanism already seen through a different branch. - return dms, true, nil - } - clustersSeen[name] = true - - state, ok := b.watchers[name] - if !ok { - // If we have not seen this cluster so far, create a watcher for it, add - // it to the map, start the watch and return. - b.createAndAddWatcherForCluster(name) - - // And since we just created the watcher, we know that we haven't - // resolved the cluster graph yet. - return dms, false, nil - } - - // A watcher exists, but no update has been received yet. - if state.lastUpdate == nil { - return dms, false, nil - } - - var dm clusterresolver.DiscoveryMechanism - cluster := state.lastUpdate - switch cluster.ClusterType { - case xdsresource.ClusterTypeAggregate: - // This boolean is used to track if any of the clusters in the graph is - // not yet completely resolved or returns errors, thereby allowing us to - // traverse as much of the graph as possible (and start the associated - // watches where required) to ensure that clustersSeen contains all - // clusters in the graph that we can traverse to. - missingCluster := false - var err error - for _, child := range cluster.PrioritizedClusterNames { - var ok bool - dms, ok, err = b.generateDMsForCluster(child, depth+1, dms, clustersSeen) - if err != nil || !ok { - missingCluster = true - } - } - return dms, !missingCluster, err - case xdsresource.ClusterTypeEDS: - dm = clusterresolver.DiscoveryMechanism{ - Type: clusterresolver.DiscoveryMechanismTypeEDS, - Cluster: cluster.ClusterName, - EDSServiceName: cluster.EDSServiceName, - MaxConcurrentRequests: cluster.MaxRequests, - LoadReportingServer: cluster.LRSServerConfig, - } - case xdsresource.ClusterTypeLogicalDNS: - dm = clusterresolver.DiscoveryMechanism{ - Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, - Cluster: cluster.ClusterName, - DNSHostname: cluster.DNSHostName, - MaxConcurrentRequests: cluster.MaxRequests, - LoadReportingServer: cluster.LRSServerConfig, - } - } - odJSON := cluster.OutlierDetection - // "In the cds LB policy, if the outlier_detection field is not set in - // the Cluster resource, a "no-op" outlier_detection config will be - // generated in the corresponding DiscoveryMechanism config, with all - // fields unset." - A50 - if odJSON == nil { - // This will pick up top level defaults in Cluster Resolver - // ParseConfig, but sre and fpe will be nil still so still a - // "no-op" config. - odJSON = json.RawMessage(`{}`) - } - dm.OutlierDetection = odJSON - - dm.TelemetryLabels = cluster.TelemetryLabels - - return append(dms, dm), true, nil -} - func (b *cdsBalancer) onClusterError(name string, err error) { if b.childLB != nil { b.onClusterAmbientError(name, err) diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cluster_watcher.go deleted file mode 100644 index 355923964c..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/cluster_watcher.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cdsbalancer - -import ( - "context" - - "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" -) - -// clusterWatcher implements the xdsresource.ClusterWatcher interface, and is -// passed to the xDS client as part of the WatchResource() API. -// -// It watches a single cluster and handles callbacks from the xDS client by -// scheduling them on the parent LB policy's serializer. -type clusterWatcher struct { - name string - parent *cdsBalancer -} - -func (cw *clusterWatcher) ResourceChanged(u *xdsresource.ClusterUpdate, onDone func()) { - handleUpdate := func(context.Context) { cw.parent.onClusterUpdate(cw.name, u); onDone() } - - cw.parent.serializer.ScheduleOr(handleUpdate, onDone) -} - -func (cw *clusterWatcher) ResourceError(err error, onDone func()) { - handleResourceError := func(context.Context) { cw.parent.onClusterResourceError(cw.name, err); onDone() } - cw.parent.serializer.ScheduleOr(handleResourceError, onDone) -} - -func (cw *clusterWatcher) AmbientError(err error, onDone func()) { - handleError := func(context.Context) { cw.parent.onClusterAmbientError(cw.name, err); onDone() } - cw.parent.serializer.ScheduleOr(handleError, onDone) -} - -// watcherState groups the state associated with a clusterWatcher. -type watcherState struct { - watcher *clusterWatcher // The underlying watcher. - cancelWatch func() // Cancel func to cancel the watch. - lastUpdate *xdsresource.ClusterUpdate // Most recent update received for this cluster. -} diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/configbuilder.go similarity index 73% rename from vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/configbuilder.go rename to vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/configbuilder.go index d5675efb1b..54bede8933 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/configbuilder.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/configbuilder.go @@ -16,7 +16,7 @@ * */ -package clusterresolver +package cdsbalancer import ( "encoding/json" @@ -33,30 +33,48 @@ import ( "google.golang.org/grpc/internal/xds/balancer/outlierdetection" "google.golang.org/grpc/internal/xds/balancer/priority" "google.golang.org/grpc/internal/xds/balancer/wrrlocality" + "google.golang.org/grpc/internal/xds/clients" "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" "google.golang.org/grpc/resolver" ) const million = 1000000 -// priorityConfig is config for one priority. For example, if there's an EDS and a -// DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. +// priorityConfig is config for one priority. For example, if there's an EDS and +// a DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. // -// Each priorityConfig corresponds to one discovery mechanism from the LBConfig -// generated by the CDS balancer. The CDS balancer resolves the cluster name to -// an ordered list of discovery mechanisms (if the top cluster is an aggregated -// cluster), one for each underlying cluster. +// Each priorityConfig corresponds to one leaf cluster retrieved from XDSConfig +// for the top-level cluster. type priorityConfig struct { - mechanism DiscoveryMechanism - // edsResp is set only if type is EDS. - edsResp xdsresource.EndpointsUpdate - // endpoints is set only if type is DNS. - endpoints []resolver.Endpoint - // Each discovery mechanism has a name generator so that the child policies - // can reuse names between updates (EDS updates for example). + // clusterConfig has the cluster update as well as EDS or DNS endpoints + // depending on the leaf cluster type. + clusterConfig *xdsresource.ClusterConfig + // outlierDetection is the Outlier Detection LB configuration for this + // priority. + outlierDetection outlierdetection.LBConfig + // Each leaf cluster has a name generator so that the child policies can + // reuse names between updates (EDS updates for example). childNameGen *nameGenerator } +// hostName returns the name of the host for the given cluster. +// +// For EDS, it's the EDSServiceName (or ClusterName if empty). +// For DNS, it's the DNSHostName. +func hostName(clusterName string, update xdsresource.ClusterUpdate) string { + switch update.ClusterType { + case xdsresource.ClusterTypeEDS: + if update.EDSServiceName != "" { + return update.EDSServiceName + } + return clusterName + case xdsresource.ClusterTypeLogicalDNS: + return update.DNSHostName + default: + return "" + } +} + // buildPriorityConfigJSON builds balancer config for the passed in // priorities. // @@ -73,7 +91,7 @@ type priorityConfig struct { // ┌──────▼─────┐ ┌─────▼──────┐ // │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) // └────────────┘ └────────────┘ -func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Endpoint, error) { +func buildPriorityConfigJSON(priorities []*priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Endpoint, error) { pc, endpoints, err := buildPriorityConfig(priorities, xdsLBPolicy) if err != nil { return nil, nil, fmt.Errorf("failed to build priority config: %v", err) @@ -85,21 +103,22 @@ func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internals return ret, endpoints, nil } -func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Endpoint, error) { +func buildPriorityConfig(priorities []*priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Endpoint, error) { var ( retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} retEndpoints []resolver.Endpoint ) for _, p := range priorities { - switch p.mechanism.Type { - case DiscoveryMechanismTypeEDS: - names, configs, endpoints, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) + clusterUpdate := p.clusterConfig.Cluster + switch clusterUpdate.ClusterType { + case xdsresource.ClusterTypeEDS: + names, configs, endpoints, err := buildClusterImplConfigForEDS(p.childNameGen, p.clusterConfig, xdsLBPolicy) if err != nil { return nil, nil, err } retConfig.Priorities = append(retConfig.Priorities, names...) retEndpoints = append(retEndpoints, endpoints...) - odCfgs := convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) + odCfgs := convertClusterImplMapToOutlierDetection(configs, p.outlierDetection) for n, c := range odCfgs { retConfig.Children[n] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, @@ -108,11 +127,11 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi } } continue - case DiscoveryMechanismTypeLogicalDNS: - name, config, endpoints := buildClusterImplConfigForDNS(p.childNameGen, p.endpoints, p.mechanism) + case xdsresource.ClusterTypeLogicalDNS: + name, config, endpoints := buildClusterImplConfigForDNS(p.childNameGen, p.clusterConfig, xdsLBPolicy) retConfig.Priorities = append(retConfig.Priorities, name) retEndpoints = append(retEndpoints, endpoints...) - odCfg := makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) + odCfg := makeClusterImplOutlierDetectionChild(config, p.outlierDetection) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, // Not ignore re-resolution from DNS children, they will trigger @@ -139,27 +158,35 @@ func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg out return &odCfgRet } -func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoint, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Endpoint) { - // Endpoint picking policy for DNS is hardcoded to pick_first. - const childPolicy = "pick_first" - retEndpoints := make([]resolver.Endpoint, len(endpoints)) +func buildClusterImplConfigForDNS(g *nameGenerator, config *xdsresource.ClusterConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (string, *clusterimpl.LBConfig, []resolver.Endpoint) { pName := fmt.Sprintf("priority-%v", g.prefix) - for i, e := range endpoints { - // For Logical DNS clusters, the same hostname attribute is added - // to all endpoints. It is set to the name that is resolved for the - // Logical DNS cluster, including the port number. - retEndpoints[i] = xdsresource.SetHostname(hierarchy.SetInEndpoint(e, []string{pName}), mechanism.DNSHostname) - // Copy the nested address field as slice fields are shared by the - // iteration variable and the original slice. - retEndpoints[i].Addresses = append([]resolver.Address{}, e.Addresses...) + clusterUpdate := config.Cluster + lbconfig := &clusterimpl.LBConfig{ + Cluster: clusterUpdate.ClusterName, + ChildPolicy: xdsLBPolicy, + } + endpoints := config.EndpointConfig.DNSEndpoints.Endpoints + if len(endpoints) == 0 { + return pName, lbconfig, nil } - return pName, &clusterimpl.LBConfig{ - Cluster: mechanism.Cluster, - TelemetryLabels: mechanism.TelemetryLabels, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, - MaxConcurrentRequests: mechanism.MaxConcurrentRequests, - LoadReportingServer: mechanism.LoadReportingServer, - }, retEndpoints + var retEndpoint resolver.Endpoint + for _, e := range endpoints { + // LOGICAL_DNS requires all resolved addresses to be grouped into a + // single logical endpoint. We iterate over the input endpoints and + // aggregate their addresses into a new endpoint variable. + retEndpoint.Addresses = append(retEndpoint.Addresses, e.Addresses...) + } + // Even though localities are not a thing for the LOGICAL_DNS cluster and + // its endpoint(s), we add an empty locality attribute here to ensure that + // LB policies that rely on locality information (like weighted_target) + // continue to work. + localityStr := xdsinternal.LocalityString(clients.Locality{}) + retEndpoint = xdsresource.SetHostname(hierarchy.SetInEndpoint(retEndpoint, []string{pName, localityStr}), clusterUpdate.DNSHostName) + // Set the locality weight to 1. This is required because the child policy + // like weighted_target which relies on locality weights to distribute + // traffic. These policies may drop traffic if the weight is 0. + retEndpoint = wrrlocality.SetAddrInfo(retEndpoint, wrrlocality.AddrInfo{LocalityWeight: 1}) + return pName, lbconfig, []resolver.Endpoint{retEndpoint} } // buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for @@ -171,14 +198,8 @@ func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoin // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Endpoint, error) { - drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) - for _, d := range edsResp.Drops { - drops = append(drops, clusterimpl.DropConfig{ - Category: d.Category, - RequestsPerMillion: d.Numerator * million / d.Denominator, - }) - } +func buildClusterImplConfigForEDS(g *nameGenerator, config *xdsresource.ClusterConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Endpoint, error) { + edsUpdate := config.EndpointConfig.EDSUpdate // Localities of length 0 is triggered by an NACK or resource-not-found // error before update, or an empty localities list in an update. In either @@ -188,15 +209,15 @@ func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.Endpoint // should report a result that is a single priority with no endpoints." - // A37 priorities := [][]xdsresource.Locality{{}} - if len(edsResp.Localities) != 0 { - priorities = groupLocalitiesByPriority(edsResp.Localities) + if len(edsUpdate.Localities) != 0 { + priorities = groupLocalitiesByPriority(edsUpdate.Localities) } retNames := g.generate(priorities) retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) var retEndpoints []resolver.Endpoint for i, pName := range retNames { priorityLocalities := priorities[i] - cfg, endpoints, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) + cfg, endpoints, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, *config.Cluster, xdsLBPolicy) if err != nil { return nil, nil, nil, err } @@ -234,7 +255,7 @@ func groupLocalitiesByPriority(localities []xdsresource.Locality) [][]xdsresourc // priority), and generates a cluster impl policy config, and a list of // addresses with their path hierarchy set to [priority-name, locality-name], so // priority and the xDS LB Policy know which child policy each address is for. -func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Endpoint, error) { +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, clusterUpdate xdsresource.ClusterUpdate, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Endpoint, error) { var retEndpoints []resolver.Endpoint // Compute the sum of locality weights to normalize locality weights. The @@ -296,13 +317,8 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority } } return &clusterimpl.LBConfig{ - Cluster: mechanism.Cluster, - EDSServiceName: mechanism.EDSServiceName, - LoadReportingServer: mechanism.LoadReportingServer, - MaxConcurrentRequests: mechanism.MaxConcurrentRequests, - TelemetryLabels: mechanism.TelemetryLabels, - DropCategories: drops, - ChildPolicy: xdsLBPolicy, + Cluster: clusterUpdate.ClusterName, + ChildPolicy: xdsLBPolicy, }, retEndpoints, nil } diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/configbuilder_childname.go b/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/configbuilder_childname.go similarity index 62% rename from vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/configbuilder_childname.go rename to vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/configbuilder_childname.go index 296ed740e4..2b0fedd15e 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/configbuilder_childname.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/cdsbalancer/configbuilder_childname.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package clusterresolver +package cdsbalancer import ( "fmt" @@ -31,9 +31,10 @@ import ( // struct keeps state between generate() calls, and a later generate() might // return names returned by the previous call. type nameGenerator struct { - existingNames map[clients.Locality]string - prefix uint64 - nextID uint64 + prevLocalitiesToChildNames map[clients.Locality]string // locality to child name mapping generated for the previous update + prevChildNames []string // prioritized list of child names generated for the previous update + prefix uint64 + nextID uint64 } func newNameGenerator(prefix uint64) *nameGenerator { @@ -53,36 +54,58 @@ func newNameGenerator(prefix uint64) *nameGenerator { // - update 3: [[L1, L2], [L3]] --> ["0", "2"] (Two priorities were merged) // - update 4: [[L1], [L4]] --> ["0", "3",] (A priority was split, and a new priority was added) func (ng *nameGenerator) generate(priorities [][]xdsresource.Locality) []string { - var ret []string + ret := make([]string, len(priorities)) usedNames := make(map[string]bool) newNames := make(map[clients.Locality]string) - for _, priority := range priorities { - var nameFound string + + // Pass 1: Same priority index match. + for i, priority := range priorities { + if i >= len(ng.prevChildNames) { + continue + } + targetName := ng.prevChildNames[i] + for _, locality := range priority { + if name, ok := ng.prevLocalitiesToChildNames[locality.ID]; ok && name == targetName { + ret[i] = targetName + usedNames[targetName] = true + break + } + } + } + + // Pass 2: Greedy reuse. + for i, priority := range priorities { + if ret[i] != "" { + continue + } for _, locality := range priority { - if name, ok := ng.existingNames[locality.ID]; ok { + if name, ok := ng.prevLocalitiesToChildNames[locality.ID]; ok { if !usedNames[name] { - nameFound = name - // Found a name to use. No need to process the remaining - // localities. + ret[i] = name + usedNames[name] = true break } } } + } - if nameFound == "" { - // No appropriate used name is found. Make a new name. - nameFound = fmt.Sprintf("priority-%d-%d", ng.prefix, ng.nextID) + // Pass 3: New name. + for i, name := range ret { + if name == "" { + newID := fmt.Sprintf("priority-%d-%d", ng.prefix, ng.nextID) ng.nextID++ + ret[i] = newID + usedNames[newID] = true } + } - ret = append(ret, nameFound) - // All localities in this priority share the same name. Add them all to - // the new map. + // Update state. + for i, priority := range priorities { for _, l := range priority { - newNames[l.ID] = nameFound + newNames[l.ID] = ret[i] } - usedNames[nameFound] = true } - ng.existingNames = newNames + ng.prevLocalitiesToChildNames = newNames + ng.prevChildNames = ret return ret } diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/clusterimpl.go index 4818ddf0c1..371d52e97f 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/clusterimpl.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/clusterimpl.go @@ -122,29 +122,42 @@ type clusterImplBalancer struct { telemetryLabels map[string]string // Telemetry labels to set on picks, from LB config. } -// handleDropAndRequestCountLocked compares drop and request counter in newConfig with -// the one currently used by picker, and is protected by b.mu. It returns a boolean -// indicating if a new picker needs to be generated. -func (b *clusterImplBalancer) handleDropAndRequestCountLocked(newConfig *LBConfig) bool { +// handleDropAndRequestCountLocked compares drop and request counter in new +// update with the one currently used by picker, and is protected by b.mu. It +// returns a boolean indicating if a new picker needs to be generated. +func (b *clusterImplBalancer) handleDropAndRequestCountLocked(clusterConfig xdsresource.ClusterConfig) bool { + clusterUpdate := clusterConfig.Cluster var updatePicker bool - if !slices.Equal(b.dropCategories, newConfig.DropCategories) { - b.dropCategories = newConfig.DropCategories - b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) - for _, c := range newConfig.DropCategories { - b.drops = append(b.drops, newDropper(c)) + + var newDrops []DropConfig + if clusterUpdate.ClusterType == xdsresource.ClusterTypeEDS { + edsUpdate := clusterConfig.EndpointConfig.EDSUpdate + newDrops = make([]DropConfig, 0, len(edsUpdate.Drops)) + for _, d := range edsUpdate.Drops { + newDrops = append(newDrops, DropConfig{ + Category: d.Category, + RequestsPerMillion: d.Numerator * million / d.Denominator, + }) + } + if !slices.Equal(b.dropCategories, newDrops) { + b.dropCategories = newDrops + b.drops = make([]*dropper, 0, len(newDrops)) + for _, c := range newDrops { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true } - updatePicker = true } - if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { - b.requestCounterCluster = newConfig.Cluster - b.requestCounterService = newConfig.EDSServiceName - b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) + if b.requestCounterCluster != clusterUpdate.ClusterName || b.requestCounterService != clusterUpdate.EDSServiceName { + b.requestCounterCluster = clusterUpdate.ClusterName + b.requestCounterService = clusterUpdate.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(clusterUpdate.ClusterName, clusterUpdate.EDSServiceName) updatePicker = true } var newRequestCountMax uint32 = 1024 - if newConfig.MaxConcurrentRequests != nil { - newRequestCountMax = *newConfig.MaxConcurrentRequests + if clusterUpdate.MaxRequests != nil { + newRequestCountMax = *clusterUpdate.MaxRequests } if b.requestCountMax != newRequestCountMax { b.requestCountMax = newRequestCountMax @@ -168,20 +181,20 @@ func (b *clusterImplBalancer) newPickerLocked() *picker { // updateLoadStore checks the config for load store, and decides whether it // needs to restart the load reporting stream. -func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { +func (b *clusterImplBalancer) updateLoadStore(clusterUpdate *xdsresource.ClusterUpdate) error { var updateLoadClusterAndService bool // ClusterName is different, restart. ClusterName is from ClusterName and // EDSServiceName. clusterName := b.getClusterName() - if clusterName != newConfig.Cluster { + if clusterName != clusterUpdate.ClusterName { updateLoadClusterAndService = true - b.setClusterName(newConfig.Cluster) - clusterName = newConfig.Cluster + b.setClusterName(clusterUpdate.ClusterName) + clusterName = clusterUpdate.ClusterName } - if b.edsServiceName != newConfig.EDSServiceName { + if b.edsServiceName != clusterUpdate.EDSServiceName { updateLoadClusterAndService = true - b.edsServiceName = newConfig.EDSServiceName + b.edsServiceName = clusterUpdate.EDSServiceName } if updateLoadClusterAndService { // This updates the clusterName and serviceName that will be reported @@ -202,21 +215,21 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { // Check if it's necessary to restart load report. if b.lrsServer == nil { - if newConfig.LoadReportingServer != nil { + if clusterUpdate.LRSServerConfig != nil { // Old is nil, new is not nil, start new LRS. - b.lrsServer = newConfig.LoadReportingServer + b.lrsServer = clusterUpdate.LRSServerConfig startNewLoadReport = true } // Old is nil, new is nil, do nothing. - } else if newConfig.LoadReportingServer == nil { + } else if clusterUpdate.LRSServerConfig == nil { // Old is not nil, new is nil, stop old, don't start new. - b.lrsServer = newConfig.LoadReportingServer + b.lrsServer = nil stopOldLoadReport = true } else { // Old is not nil, new is not nil, compare string values, if // different, stop old and start new. - if !b.lrsServer.Equal(newConfig.LoadReportingServer) { - b.lrsServer = newConfig.LoadReportingServer + if !b.lrsServer.Equal(clusterUpdate.LRSServerConfig) { + b.lrsServer = clusterUpdate.LRSServerConfig stopOldLoadReport = true startNewLoadReport = true } @@ -276,11 +289,19 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) b.xdsClient = c } + xdsConfig := xdsresource.XDSConfigFromResolverState(s.ResolverState) + if xdsConfig == nil { + b.logger.Warningf("Received balancer config with no xDS config") + return balancer.ErrBadResolverState + } + clusterCfg := xdsConfig.Clusters[newConfig.Cluster] + clusterUpdate := clusterCfg.Config.Cluster + // Update load reporting config. This needs to be done before updating the // child policy because we need the loadStore from the updated client to be // passed to the ccWrapper, so that the next picker from the child policy // will pick up the new loadStore. - if err := b.updateLoadStore(newConfig); err != nil { + if err := b.updateLoadStore(clusterUpdate); err != nil { return err } @@ -301,7 +322,7 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) }) b.mu.Lock() - b.telemetryLabels = newConfig.TelemetryLabels + b.telemetryLabels = clusterUpdate.TelemetryLabels // We want to send a picker update to the parent if one of the two // conditions are met: // - drop/request config has changed *and* there is already a picker from @@ -309,7 +330,7 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) // - there is a pending picker update from the child (and this covers the // case where the drop/request config has not changed, but the child sent // a picker update while we were still processing config from our parent). - if (b.handleDropAndRequestCountLocked(newConfig) && b.childState.Picker != nil) || b.pendingPickerUpdates { + if (b.handleDropAndRequestCountLocked(clusterCfg.Config) && b.childState.Picker != nil) || b.pendingPickerUpdates { b.pendingPickerUpdates = false b.ClientConn.UpdateState(balancer.State{ ConnectivityState: b.childState.ConnectivityState, diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/config.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/config.go index 34f777a208..47be86e79c 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/config.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterimpl/config.go @@ -22,7 +22,6 @@ import ( "encoding/json" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/serviceconfig" ) @@ -36,16 +35,8 @@ type DropConfig struct { type LBConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` - Cluster string `json:"cluster,omitempty"` - EDSServiceName string `json:"edsServiceName,omitempty"` - // LoadReportingServer is the LRS server to send load reports to. If not - // present, load reporting will be disabled. - LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` - MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` - DropCategories []DropConfig `json:"dropCategories,omitempty"` - // TelemetryLabels are the telemetry Labels associated with this cluster. - TelemetryLabels map[string]string `json:"telemetryLabels,omitempty"` - ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` + Cluster string `json:"cluster,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } func parseConfig(c json.RawMessage) (*LBConfig, error) { diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clustermanager/clustermanager.go index 05e10e579d..c5300a62fc 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clustermanager/clustermanager.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/clustermanager/clustermanager.go @@ -141,7 +141,7 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er }, BalancerConfig: lbCfg, }); err != nil { - retErr = fmt.Errorf("failed to push new configuration %v to child %q", childCfg.ChildPolicy.Config, childName) + retErr = fmt.Errorf("failed to push new configuration %v to child %q: %v", childCfg.ChildPolicy.Config, childName, err) b.setErrorPickerForChild(childName, retErr) } diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/clusterresolver.go deleted file mode 100644 index 1990704682..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/clusterresolver.go +++ /dev/null @@ -1,410 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package clusterresolver contains the implementation of the -// cluster_resolver_experimental LB policy which resolves endpoint addresses -// using a list of one or more discovery mechanisms. -package clusterresolver - -import ( - "encoding/json" - "errors" - "fmt" - - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancer/nop" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/balancer/outlierdetection" - "google.golang.org/grpc/internal/xds/balancer/priority" - "google.golang.org/grpc/internal/xds/xdsclient" - "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// Name is the name of the cluster_resolver balancer. -const Name = "cluster_resolver_experimental" - -var ( - errBalancerClosed = errors.New("cdsBalancer is closed") - newChildBalancer = func(bb balancer.Builder, cc balancer.ClientConn, o balancer.BuildOptions) balancer.Balancer { - return bb.Build(cc, o) - } -) - -func init() { - balancer.Register(bb{}) -} - -type bb struct{} - -// Build helps implement the balancer.Builder interface. -func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - priorityBuilder := balancer.Get(priority.Name) - if priorityBuilder == nil { - logger.Errorf("%q LB policy is needed but not registered", priority.Name) - return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", priority.Name)) - } - priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) - if !ok { - logger.Errorf("%q LB policy does not implement a config parser", priority.Name) - return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", priority.Name)) - } - - b := &clusterResolverBalancer{ - bOpts: opts, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - - priorityBuilder: priorityBuilder, - priorityConfigParser: priorityConfigParser, - } - b.logger = prefixLogger(b) - b.logger.Infof("Created") - - b.resourceWatcher = newResourceResolver(b, b.logger) - b.cc = &ccWrapper{ - ClientConn: cc, - b: b, - resourceWatcher: b.resourceWatcher, - } - - go b.run() - return b -} - -func (bb) Name() string { - return Name -} - -func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - odBuilder := balancer.Get(outlierdetection.Name) - if odBuilder == nil { - // Shouldn't happen, registered through imported Outlier Detection, - // defensive programming. - return nil, fmt.Errorf("%q LB policy is needed but not registered", outlierdetection.Name) - } - odParser, ok := odBuilder.(balancer.ConfigParser) - if !ok { - // Shouldn't happen, imported Outlier Detection builder has this method. - return nil, fmt.Errorf("%q LB policy does not implement a config parser", outlierdetection.Name) - } - - var cfg *LBConfig - if err := json.Unmarshal(j, &cfg); err != nil { - return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(j), err) - } - - for i, dm := range cfg.DiscoveryMechanisms { - lbCfg, err := odParser.ParseConfig(dm.OutlierDetection) - if err != nil { - return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err) - } - odCfg, ok := lbCfg.(*outlierdetection.LBConfig) - if !ok { - // Shouldn't happen, Parser built at build time with Outlier Detection - // builder pulled from gRPC LB Registry. - return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg) - } - cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg - } - if err := json.Unmarshal(cfg.XDSLBPolicy, &cfg.xdsLBPolicy); err != nil { - // This will never occur, valid configuration is emitted from the xDS - // Client. Validity is already checked in the xDS Client, however, this - // double validation is present because Unmarshalling and Validating are - // coupled into one json.Unmarshal operation. We will switch this in - // the future to two separate operations. - return nil, fmt.Errorf("error unmarshalling xDS LB Policy: %v", err) - } - return cfg, nil -} - -// ccUpdate wraps a clientConn update received from gRPC. -type ccUpdate struct { - state balancer.ClientConnState - err error -} - -type exitIdle struct{} - -// clusterResolverBalancer resolves endpoint addresses using a list of one or -// more discovery mechanisms. -type clusterResolverBalancer struct { - cc balancer.ClientConn - bOpts balancer.BuildOptions - updateCh *buffer.Unbounded // Channel for updates from gRPC. - resourceWatcher *resourceResolver - logger *grpclog.PrefixLogger - closed *grpcsync.Event - done *grpcsync.Event - - priorityBuilder balancer.Builder - priorityConfigParser balancer.ConfigParser - - config *LBConfig - configRaw *serviceconfig.ParseResult - xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. - attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. - - child balancer.Balancer - priorities []priorityConfig - watchUpdateReceived bool -} - -// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. -// -// A good update results in creation of endpoint resolvers for the configured -// discovery mechanisms. An update with an error results in cancellation of any -// existing endpoint resolution and propagation of the same to the child policy. -func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { - if err := update.err; err != nil { - b.handleErrorFromUpdate(err, true) - return - } - - if b.logger.V(2) { - b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) - } - - cfg, _ := update.state.BalancerConfig.(*LBConfig) - if cfg == nil { - b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) - return - } - - b.config = cfg - b.configRaw = update.state.ResolverState.ServiceConfig - b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) - - // The child policy is created only after all configured discovery - // mechanisms have been successfully returned endpoints. If that is not the - // case, we return early. - if !b.watchUpdateReceived { - return - } - b.updateChildConfig() -} - -// handleResourceUpdate handles a resource update or error from the resource -// resolver by propagating the same to the child LB policy. -func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { - b.watchUpdateReceived = true - b.priorities = update.priorities - - // An update from the resource resolver contains resolved endpoint addresses - // for all configured discovery mechanisms ordered by priority. This is used - // to generate configuration for the priority LB policy. - b.updateChildConfig() - - if update.onDone != nil { - update.onDone() - } -} - -// updateChildConfig builds child policy configuration using endpoint addresses -// returned by the resource resolver and child policy configuration provided by -// parent LB policy. -// -// A child policy is created if one doesn't already exist. The newly built -// configuration is then pushed to the child policy. -func (b *clusterResolverBalancer) updateChildConfig() { - if b.child == nil { - b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) - } - - childCfgBytes, endpoints, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) - if err != nil { - b.logger.Warningf("Failed to build child policy config: %v", err) - return - } - childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) - if err != nil { - b.logger.Warningf("Failed to parse child policy config. This should never happen because the config was generated: %v", err) - return - } - if b.logger.V(2) { - b.logger.Infof("Built child policy config: %s", pretty.ToJSON(childCfg)) - } - - for i := range endpoints { - for j := range endpoints[i].Addresses { - addr := endpoints[i].Addresses[j] - addr.BalancerAttributes = endpoints[i].Attributes - // BalancerAttributes need to be present in endpoint addresses. This - // temporary workaround is required to make load reporting work - // with the old pickfirst policy which creates SubConns with multiple - // addresses. Since the addresses can be from different localities, - // an Address.BalancerAttribute is used to identify the locality of the - // address used by the transport. This workaround can be removed once - // the old pickfirst is removed. - // See https://github.com/grpc/grpc-go/issues/7339 - endpoints[i].Addresses[j] = addr - } - } - if err := b.child.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Endpoints: endpoints, - ServiceConfig: b.configRaw, - Attributes: b.attrsWithClient, - }, - BalancerConfig: childCfg, - }); err != nil { - b.logger.Warningf("Failed to push config to child policy: %v", err) - } -} - -// handleErrorFromUpdate handles errors from the parent LB policy and endpoint -// resolvers. fromParent is true if error is from the parent LB policy. In both -// cases, the error is propagated to the child policy, if one exists. -func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { - b.logger.Warningf("Received error: %v", err) - - // A resource-not-found error from the parent LB policy means that the LDS - // or CDS resource was removed. This should result in endpoint resolvers - // being stopped here. - // - // A resource-not-found error from the EDS endpoint resolver means that the - // EDS resource was removed. No action needs to be taken for this, and we - // should continue watching the same EDS resource. - if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { - b.resourceWatcher.stop(false) - } - - if b.child != nil { - b.child.ResolverError(err) - return - } - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(err), - }) -} - -// run is a long-running goroutine that handles updates from gRPC and endpoint -// resolvers. The methods handling the individual updates simply push them onto -// a channel which is read and acted upon from here. -func (b *clusterResolverBalancer) run() { - for { - select { - case u, ok := <-b.updateCh.Get(): - if !ok { - return - } - b.updateCh.Load() - switch update := u.(type) { - case *ccUpdate: - b.handleClientConnUpdate(update) - case exitIdle: - if b.child == nil { - // This is not necessarily an error. The EDS/DNS watch may - // not have returned a list of endpoints yet, so the child - // may not be built. - if b.logger.V(2) { - b.logger.Infof("xds: received ExitIdle with no child balancer") - } - break - } - b.child.ExitIdle() - } - case u := <-b.resourceWatcher.updateChannel: - b.handleResourceUpdate(u) - - // Close results in stopping the endpoint resolvers and closing the - // underlying child policy and is the only way to exit this goroutine. - case <-b.closed.Done(): - b.resourceWatcher.stop(true) - - if b.child != nil { - b.child.Close() - b.child = nil - } - b.updateCh.Close() - // This is the *ONLY* point of return from this function. - b.logger.Infof("Shutdown") - b.done.Fire() - return - } - } -} - -// Following are methods to implement the balancer interface. - -func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if b.closed.HasFired() { - b.logger.Warningf("Received update from gRPC {%+v} after close", state) - return errBalancerClosed - } - - if b.xdsClient == nil { - c := xdsclient.FromResolverState(state.ResolverState) - if c == nil { - return balancer.ErrBadResolverState - } - b.xdsClient = c - b.attrsWithClient = state.ResolverState.Attributes - } - - b.updateCh.Put(&ccUpdate{state: state}) - return nil -} - -// ResolverError handles errors reported by the xdsResolver. -func (b *clusterResolverBalancer) ResolverError(err error) { - if b.closed.HasFired() { - b.logger.Warningf("Received resolver error {%v} after close", err) - return - } - b.updateCh.Put(&ccUpdate{err: err}) -} - -// UpdateSubConnState handles subConn updates from gRPC. -func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) -} - -// Close closes the cdsBalancer and the underlying child balancer. -func (b *clusterResolverBalancer) Close() { - b.closed.Fire() - <-b.done.Done() -} - -func (b *clusterResolverBalancer) ExitIdle() { - b.updateCh.Put(exitIdle{}) -} - -// ccWrapper overrides ResolveNow(), so that re-resolution from the child -// policies will trigger the DNS resolver in cluster_resolver balancer. It -// also intercepts NewSubConn calls in case children don't set the -// StateListener, to allow redirection to happen via this cluster_resolver -// balancer. -type ccWrapper struct { - balancer.ClientConn - b *clusterResolverBalancer - resourceWatcher *resourceResolver -} - -func (c *ccWrapper) ResolveNow(resolver.ResolveNowOptions) { - c.resourceWatcher.resolveNow() -} diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/config.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/config.go deleted file mode 100644 index f3b4c6bf61..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/config.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package clusterresolver - -import ( - "bytes" - "encoding/json" - "fmt" - - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/internal/xds/balancer/outlierdetection" - "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/serviceconfig" -) - -// DiscoveryMechanismType is the type of discovery mechanism. -type DiscoveryMechanismType int - -const ( - // DiscoveryMechanismTypeEDS is eds. - DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` - // DiscoveryMechanismTypeLogicalDNS is DNS. - DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` -) - -// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. -// -// This is necessary to handle enum (as strings) from JSON. -// -// Note that this needs to be defined on the type not pointer, otherwise the -// variables of this type will marshal to int not string. -func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { - buffer := bytes.NewBufferString(`"`) - switch t { - case DiscoveryMechanismTypeEDS: - buffer.WriteString("EDS") - case DiscoveryMechanismTypeLogicalDNS: - buffer.WriteString("LOGICAL_DNS") - } - buffer.WriteString(`"`) - return buffer.Bytes(), nil -} - -// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. -func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { - var s string - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - switch s { - case "EDS": - *t = DiscoveryMechanismTypeEDS - case "LOGICAL_DNS": - *t = DiscoveryMechanismTypeLogicalDNS - default: - return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) - } - return nil -} - -// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. -// -// For DNS, the ClientConn target will be used for name resolution. -// -// For EDS, if EDSServiceName is not empty, it will be used for watching. If -// EDSServiceName is empty, Cluster will be used. -type DiscoveryMechanism struct { - // Cluster is the cluster name. - Cluster string `json:"cluster,omitempty"` - // LoadReportingServer is the LRS server to send load reports to. If not - // present, load reporting will be disabled. - LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` - // MaxConcurrentRequests is the maximum number of outstanding requests can - // be made to the upstream cluster. Default is 1024. - MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` - // Type is the discovery mechanism type. - Type DiscoveryMechanismType `json:"type,omitempty"` - // EDSServiceName is the EDS service name, as returned in CDS. May be unset - // if not specified in CDS. For type EDS only. - // - // This is used for EDS watch if set. If unset, Cluster is used for EDS - // watch. - EDSServiceName string `json:"edsServiceName,omitempty"` - // DNSHostname is the DNS name to resolve in "host:port" form. For type - // LOGICAL_DNS only. - DNSHostname string `json:"dnsHostname,omitempty"` - // OutlierDetection is the Outlier Detection LB configuration for this - // priority. - OutlierDetection json.RawMessage `json:"outlierDetection,omitempty"` - // TelemetryLabels are the telemetry labels associated with this cluster. - TelemetryLabels map[string]string `json:"telemetryLabels,omitempty"` - outlierDetection outlierdetection.LBConfig -} - -// Equal returns whether the DiscoveryMechanism is the same with the parameter. -func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { - od := &dm.outlierDetection - switch { - case dm.Cluster != b.Cluster: - return false - case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): - return false - case dm.Type != b.Type: - return false - case dm.EDSServiceName != b.EDSServiceName: - return false - case dm.DNSHostname != b.DNSHostname: - return false - case !od.EqualIgnoringChildPolicy(&b.outlierDetection): - return false - } - - if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { - return true - } - if (dm.LoadReportingServer != nil) != (b.LoadReportingServer != nil) { - return false - } - return dm.LoadReportingServer.String() == b.LoadReportingServer.String() -} - -func equalUint32P(a, b *uint32) bool { - if a == nil && b == nil { - return true - } - if a == nil || b == nil { - return false - } - return *a == *b -} - -// LBConfig is the config for cluster resolver balancer. -type LBConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - // DiscoveryMechanisms is an ordered list of discovery mechanisms. - // - // Must have at least one element. Results from each discovery mechanism are - // concatenated together in successive priorities. - DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` - - // XDSLBPolicy specifies the policy for locality picking and endpoint picking. - XDSLBPolicy json.RawMessage `json:"xdsLbPolicy,omitempty"` - xdsLBPolicy internalserviceconfig.BalancerConfig -} diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/logging.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/logging.go deleted file mode 100644 index 728f1f709c..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/logging.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" - internalgrpclog "google.golang.org/grpc/internal/grpclog" -) - -const prefix = "[xds-cluster-resolver-lb %p] " - -var logger = grpclog.Component("xds") - -func prefixLogger(p *clusterResolverBalancer) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) -} diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver.go deleted file mode 100644 index 90e950c733..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver.go +++ /dev/null @@ -1,322 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - "context" - "sync" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" - "google.golang.org/grpc/resolver" -) - -// resourceUpdate is a combined update from all the resources, in the order of -// priority. For example, it can be {EDS, EDS, DNS}. -type resourceUpdate struct { - // A discovery mechanism would return an empty update when it runs into - // errors, and this would result in the priority LB policy reporting - // TRANSIENT_FAILURE (if there was a single discovery mechanism), or would - // fallback to the next highest priority that is available. - priorities []priorityConfig - // To be invoked once the update is completely processed, or is dropped in - // favor of a newer update. - onDone func() -} - -// topLevelResolver is used by concrete endpointsResolver implementations for -// reporting updates and errors. The `resourceResolver` type implements this -// interface and takes appropriate actions upon receipt of updates and errors -// from underlying concrete resolvers. -type topLevelResolver interface { - // onUpdate is called when a new update is received from the underlying - // endpointsResolver implementation. The onDone callback is to be invoked - // once the update is completely processed, or is dropped in favor of a - // newer update. - onUpdate(onDone func()) -} - -// endpointsResolver wraps the functionality to resolve a given resource name to -// a set of endpoints. The mechanism used by concrete implementations depend on -// the supported discovery mechanism type. -type endpointsResolver interface { - // lastUpdate returns endpoint results from the most recent resolution. - // - // The type of the first return result is dependent on the resolver - // implementation. - // - // The second return result indicates whether the resolver was able to - // successfully resolve the resource name to endpoints. If set to false, the - // first return result is invalid and must not be used. - lastUpdate() (any, bool) - - // resolverNow triggers re-resolution of the resource. - resolveNow() - - // stop stops resolution of the resource. Implementations must not invoke - // any methods on the topLevelResolver interface once `stop()` returns. - stop() -} - -// discoveryMechanismKey is {type+resource_name}, it's used as the map key, so -// that the same resource resolver can be reused (e.g. when there are two -// mechanisms, both for the same EDS resource, but has different circuit -// breaking config). -type discoveryMechanismKey struct { - typ DiscoveryMechanismType - name string -} - -// discoveryMechanismAndResolver is needed to keep the resolver and the -// discovery mechanism together, because resolvers can be shared. And we need -// the mechanism for fields like circuit breaking, LRS etc when generating the -// balancer config. -type discoveryMechanismAndResolver struct { - dm DiscoveryMechanism - r endpointsResolver - - childNameGen *nameGenerator -} - -type resourceResolver struct { - parent *clusterResolverBalancer - logger *grpclog.PrefixLogger - updateChannel chan *resourceUpdate - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - - // mu protects the slice and map, and content of the resolvers in the slice. - mu sync.Mutex - mechanisms []DiscoveryMechanism - children []discoveryMechanismAndResolver - // childrenMap's value only needs the resolver implementation (type - // discoveryMechanism) and the childNameGen. The other two fields are not - // used. - // - // TODO(cleanup): maybe we can make a new type with just the necessary - // fields, and use it here instead. - childrenMap map[discoveryMechanismKey]discoveryMechanismAndResolver - // Each new discovery mechanism needs a child name generator to reuse child - // policy names. But to make sure the names across discover mechanism - // doesn't conflict, we need a seq ID. This ID is incremented for each new - // discover mechanism. - childNameGeneratorSeqID uint64 -} - -func newResourceResolver(parent *clusterResolverBalancer, logger *grpclog.PrefixLogger) *resourceResolver { - rr := &resourceResolver{ - parent: parent, - logger: logger, - updateChannel: make(chan *resourceUpdate, 1), - childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), - } - ctx, cancel := context.WithCancel(context.Background()) - rr.serializer = grpcsync.NewCallbackSerializer(ctx) - rr.serializerCancel = cancel - return rr -} - -func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { - if len(a) != len(b) { - return false - } - for i, aa := range a { - bb := b[i] - if !aa.Equal(bb) { - return false - } - } - return true -} - -func discoveryMechanismToKey(dm DiscoveryMechanism) discoveryMechanismKey { - switch dm.Type { - case DiscoveryMechanismTypeEDS: - nameToWatch := dm.EDSServiceName - if nameToWatch == "" { - nameToWatch = dm.Cluster - } - return discoveryMechanismKey{typ: dm.Type, name: nameToWatch} - case DiscoveryMechanismTypeLogicalDNS: - return discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} - default: - return discoveryMechanismKey{} - } -} - -func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { - rr.mu.Lock() - defer rr.mu.Unlock() - if equalDiscoveryMechanisms(rr.mechanisms, mechanisms) { - return - } - rr.mechanisms = mechanisms - rr.children = make([]discoveryMechanismAndResolver, len(mechanisms)) - newDMs := make(map[discoveryMechanismKey]bool) - - // Start one watch for each new discover mechanism {type+resource_name}. - for i, dm := range mechanisms { - dmKey := discoveryMechanismToKey(dm) - newDMs[dmKey] = true - dmAndResolver, ok := rr.childrenMap[dmKey] - if ok { - // If this is not new, keep the fields (especially childNameGen), - // and only update the DiscoveryMechanism. - // - // Note that the same dmKey doesn't mean the same - // DiscoveryMechanism. There are fields (e.g. - // MaxConcurrentRequests) in DiscoveryMechanism that are not copied - // to dmKey, we need to keep those updated. - dmAndResolver.dm = dm - rr.children[i] = dmAndResolver - continue - } - - // Create resolver for a newly seen resource. - var resolver endpointsResolver - switch dm.Type { - case DiscoveryMechanismTypeEDS: - resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr, rr.logger) - case DiscoveryMechanismTypeLogicalDNS: - resolver = newDNSResolver(dmKey.name, rr, rr.logger) - } - dmAndResolver = discoveryMechanismAndResolver{ - dm: dm, - r: resolver, - childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), - } - rr.childrenMap[dmKey] = dmAndResolver - rr.children[i] = dmAndResolver - rr.childNameGeneratorSeqID++ - } - - // Stop the resources that were removed. - for dm, r := range rr.childrenMap { - if !newDMs[dm] { - delete(rr.childrenMap, dm) - go r.r.stop() - } - } - // Regenerate even if there's no change in discovery mechanism, in case - // priority order changed. - rr.generateLocked(func() {}) -} - -// resolveNow is typically called to trigger re-resolve of DNS. The EDS -// resolveNow() is a noop. -func (rr *resourceResolver) resolveNow() { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, r := range rr.childrenMap { - r.r.resolveNow() - } -} - -func (rr *resourceResolver) stop(closing bool) { - rr.mu.Lock() - - // Save the previous childrenMap to stop the children outside the mutex, - // and reinitialize the map. We only need to reinitialize to allow for the - // policy to be reused if the resource comes back. In practice, this does - // not happen as the parent LB policy will also be closed, causing this to - // be removed entirely, but a future use case might want to reuse the - // policy instead. - cm := rr.childrenMap - rr.childrenMap = make(map[discoveryMechanismKey]discoveryMechanismAndResolver) - rr.mechanisms = nil - rr.children = nil - - rr.mu.Unlock() - - for _, r := range cm { - r.r.stop() - } - - if closing { - rr.serializerCancel() - <-rr.serializer.Done() - } - - // stop() is called when the LB policy is closed or when the underlying - // cluster resource is removed by the management server. In the latter case, - // an empty config update needs to be pushed to the child policy to ensure - // that a picker that fails RPCs is sent up to the channel. - // - // Resource resolver implementations are expected to not send any updates - // after they are stopped. Therefore, we don't have to worry about another - // write to this channel happening at the same time as this one. - select { - case ru := <-rr.updateChannel: - if ru.onDone != nil { - ru.onDone() - } - default: - } - rr.updateChannel <- &resourceUpdate{} -} - -// generateLocked collects updates from all resolvers. It pushes the combined -// result on the update channel if all child resolvers have received at least -// one update. Otherwise it returns early. -// -// The onDone callback is invoked inline if not all child resolvers have -// received at least one update. If all child resolvers have received at least -// one update, onDone is invoked when the combined update is processed by the -// clusterresolver LB policy. -// -// Caller must hold rr.mu. -func (rr *resourceResolver) generateLocked(onDone func()) { - var ret []priorityConfig - for _, rDM := range rr.children { - u, ok := rDM.r.lastUpdate() - if !ok { - // Don't send updates to parent until all resolvers have update to - // send. - onDone() - return - } - switch uu := u.(type) { - case xdsresource.EndpointsUpdate: - ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) - case []resolver.Endpoint: - ret = append(ret, priorityConfig{mechanism: rDM.dm, endpoints: uu, childNameGen: rDM.childNameGen}) - } - } - select { - // A previously unprocessed update is dropped in favor of the new one, and - // the former's onDone callback is invoked to unblock the xDS client's - // receive path. - case ru := <-rr.updateChannel: - if ru.onDone != nil { - ru.onDone() - } - default: - } - rr.updateChannel <- &resourceUpdate{priorities: ret, onDone: onDone} -} - -func (rr *resourceResolver) onUpdate(onDone func()) { - handleUpdate := func(context.Context) { - rr.mu.Lock() - rr.generateLocked(onDone) - rr.mu.Unlock() - } - rr.serializer.ScheduleOr(handleUpdate, func() { onDone() }) -} diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_dns.go deleted file mode 100644 index ea292e712d..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_dns.go +++ /dev/null @@ -1,172 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - "fmt" - "net/url" - "sync" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -var ( - newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - // The dns resolver is registered by the grpc package. So, this call to - // resolver.Get() is never expected to return nil. - return resolver.Get("dns").Build(target, cc, opts) - } -) - -// dnsDiscoveryMechanism watches updates for the given DNS hostname. -// -// It implements resolver.ClientConn interface to work with the DNS resolver. -type dnsDiscoveryMechanism struct { - target string - topLevelResolver topLevelResolver - dnsR resolver.Resolver - logger *grpclog.PrefixLogger - - mu sync.Mutex - endpoints []resolver.Endpoint - updateReceived bool -} - -// newDNSResolver creates an endpoints resolver which uses a DNS resolver under -// the hood. -// -// An error in parsing the provided target string or an error in creating a DNS -// resolver means that we will never be able to resolve the provided target -// strings to endpoints. The topLevelResolver propagates address updates to the -// clusterresolver LB policy **only** after it receives updates from all its -// child resolvers. Therefore, an error here means that the topLevelResolver -// will never send address updates to the clusterresolver LB policy. -// -// Calling the onError() callback will ensure that this error is -// propagated to the child policy which eventually move the channel to -// transient failure. -// -// The `dnsR` field is unset if we run into errors in this function. Therefore, a -// nil check is required wherever we access that field. -func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism { - ret := &dnsDiscoveryMechanism{ - target: target, - topLevelResolver: topLevelResolver, - logger: logger, - } - u, err := url.Parse("dns:///" + target) - if err != nil { - if ret.logger.V(2) { - ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target) - } - ret.updateReceived = true - ret.topLevelResolver.onUpdate(func() {}) - return ret - } - - r, err := newDNS(resolver.Target{URL: *u}, ret, resolver.BuildOptions{}) - if err != nil { - if ret.logger.V(2) { - ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err) - } - ret.updateReceived = true - ret.topLevelResolver.onUpdate(func() {}) - return ret - } - ret.dnsR = r - return ret -} - -func (dr *dnsDiscoveryMechanism) lastUpdate() (any, bool) { - dr.mu.Lock() - defer dr.mu.Unlock() - - if !dr.updateReceived { - return nil, false - } - return dr.endpoints, true -} - -func (dr *dnsDiscoveryMechanism) resolveNow() { - if dr.dnsR != nil { - dr.dnsR.ResolveNow(resolver.ResolveNowOptions{}) - } -} - -// The definition of stop() mentions that implementations must not invoke any -// methods on the topLevelResolver once the call to `stop()` returns. The -// underlying dns resolver does not send any updates to the resolver.ClientConn -// interface passed to it (implemented by dnsDiscoveryMechanism in this case) -// after its `Close()` returns. Therefore, we can guarantee that no methods of -// the topLevelResolver are invoked after we return from this method. -func (dr *dnsDiscoveryMechanism) stop() { - if dr.dnsR != nil { - dr.dnsR.Close() - } -} - -// dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive -// updates from the real DNS resolver. - -func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { - if dr.logger.V(2) { - dr.logger.Infof("DNS discovery mechanism for resource %q reported an update: %s", dr.target, pretty.ToJSON(state)) - } - - dr.mu.Lock() - dr.endpoints = state.Endpoints - dr.updateReceived = true - dr.mu.Unlock() - - dr.topLevelResolver.onUpdate(func() {}) - return nil -} - -func (dr *dnsDiscoveryMechanism) ReportError(err error) { - if dr.logger.V(2) { - dr.logger.Infof("DNS discovery mechanism for resource %q reported error: %v", dr.target, err) - } - - dr.mu.Lock() - // If a previous good update was received, suppress the error and continue - // using the previous update. If RPCs were succeeding prior to this, they - // will continue to do so. Also suppress errors if we previously received an - // error, since there will be no downstream effects of propagating this - // error. - if dr.updateReceived { - dr.mu.Unlock() - return - } - dr.endpoints = nil - dr.updateReceived = true - dr.mu.Unlock() - - dr.topLevelResolver.onUpdate(func() {}) -} - -func (dr *dnsDiscoveryMechanism) NewAddress([]resolver.Address) { - dr.logger.Errorf("NewAddress called unexpectedly.") -} - -func (dr *dnsDiscoveryMechanism) ParseServiceConfig(string) *serviceconfig.ParseResult { - return &serviceconfig.ParseResult{Err: fmt.Errorf("service config not supported")} -} diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_eds.go b/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_eds.go deleted file mode 100644 index 6dcdb898e5..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/clusterresolver/resource_resolver_eds.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - "sync" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" -) - -type edsDiscoveryMechanism struct { - nameToWatch string - cancelWatch func() - topLevelResolver topLevelResolver - stopped *grpcsync.Event - logger *grpclog.PrefixLogger - - mu sync.Mutex - update *xdsresource.EndpointsUpdate // Nil indicates no update received so far. -} - -func (er *edsDiscoveryMechanism) lastUpdate() (any, bool) { - er.mu.Lock() - defer er.mu.Unlock() - - if er.update == nil { - return nil, false - } - return *er.update, true -} - -func (er *edsDiscoveryMechanism) resolveNow() { -} - -// The definition of stop() mentions that implementations must not invoke any -// methods on the topLevelResolver once the call to `stop()` returns. -func (er *edsDiscoveryMechanism) stop() { - // Canceling a watch with the xDS client can race with an xDS response - // received around the same time, and can result in the watch callback being - // invoked after the watch is canceled. Callers need to handle this race, - // and we fire the stopped event here to ensure that a watch callback - // invocation around the same time becomes a no-op. - er.stopped.Fire() - er.cancelWatch() -} - -// newEDSResolver returns an implementation of the endpointsResolver interface -// that uses EDS to resolve the given name to endpoints. -func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *edsDiscoveryMechanism { - ret := &edsDiscoveryMechanism{ - nameToWatch: nameToWatch, - topLevelResolver: topLevelResolver, - logger: logger, - stopped: grpcsync.NewEvent(), - } - ret.cancelWatch = xdsresource.WatchEndpoints(producer, nameToWatch, ret) - return ret -} - -// ResourceChanged is invoked to report an update for the resource being watched. -func (er *edsDiscoveryMechanism) ResourceChanged(update *xdsresource.EndpointsUpdate, onDone func()) { - if er.stopped.HasFired() { - onDone() - return - } - - er.mu.Lock() - er.update = update - er.mu.Unlock() - - er.topLevelResolver.onUpdate(onDone) -} - -func (er *edsDiscoveryMechanism) ResourceError(err error, onDone func()) { - if er.stopped.HasFired() { - onDone() - return - } - - if er.logger.V(2) { - er.logger.Infof("EDS discovery mechanism for resource %q reported resource error: %v", er.nameToWatch, err) - } - - // Report an empty update that would result in no priority child being - // created for this discovery mechanism. This would result in the priority - // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or - // localities) if this was the only discovery mechanism, or would result in - // the priority LB policy using a lower priority discovery mechanism when - // that becomes available. - er.mu.Lock() - er.update = &xdsresource.EndpointsUpdate{} - er.mu.Unlock() - - er.topLevelResolver.onUpdate(onDone) -} - -func (er *edsDiscoveryMechanism) AmbientError(err error, onDone func()) { - if er.stopped.HasFired() { - onDone() - return - } - - if er.logger.V(2) { - er.logger.Infof("EDS discovery mechanism for resource %q reported ambient error: %v", er.nameToWatch, err) - } -} diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/internal/xds/balancer/outlierdetection/balancer.go index ff5078683b..e47ac40671 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/outlierdetection/balancer.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/outlierdetection/balancer.go @@ -251,7 +251,7 @@ func (b *outlierDetectionBalancer) onIntervalConfig() { var interval time.Duration if b.timerStartTime.IsZero() { b.timerStartTime = time.Now() - for _, epInfo := range b.endpoints.Values() { + for _, epInfo := range b.endpoints.All() { epInfo.callCounter.clear() } interval = time.Duration(b.cfg.Interval) @@ -274,7 +274,7 @@ func (b *outlierDetectionBalancer) onNoopConfig() { // do the following:" // "Unset the timer start timestamp." b.timerStartTime = time.Time{} - for _, epInfo := range b.endpoints.Values() { + for _, epInfo := range b.endpoints.All() { // "Uneject all currently ejected endpoints." if !epInfo.latestEjectionTimestamp.IsZero() { b.unejectEndpoint(epInfo) @@ -326,7 +326,7 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt } } - for _, ep := range b.endpoints.Keys() { + for ep := range b.endpoints.All() { if _, ok := newEndpoints.Get(ep); !ok { b.endpoints.Delete(ep) } @@ -657,7 +657,7 @@ func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { defer b.mu.Unlock() b.timerStartTime = time.Now() - for _, epInfo := range b.endpoints.Values() { + for _, epInfo := range b.endpoints.All() { epInfo.callCounter.swap() } @@ -669,7 +669,7 @@ func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { b.failurePercentageAlgorithm() } - for _, epInfo := range b.endpoints.Values() { + for _, epInfo := range b.endpoints.All() { if epInfo.latestEjectionTimestamp.IsZero() && epInfo.ejectionTimeMultiplier > 0 { epInfo.ejectionTimeMultiplier-- continue @@ -701,7 +701,7 @@ func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { // Caller must hold b.mu. func (b *outlierDetectionBalancer) endpointsWithAtLeastRequestVolume(requestVolume uint32) []*endpointInfo { var endpoints []*endpointInfo - for _, epInfo := range b.endpoints.Values() { + for _, epInfo := range b.endpoints.All() { bucket1 := epInfo.callCounter.inactiveBucket rv := bucket1.numSuccesses + bucket1.numFailures if rv >= requestVolume { diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer.go index 950cd13e65..e842920ffc 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/hierarchy" @@ -153,7 +154,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // The balancing policy name is changed, close the old child. But don't // rebuild, rebuild will happen when syncing priorities. if currentChild.balancerName != bb.Name() { - currentChild.stop() + currentChild.stop(true) currentChild.updateBalancerName(bb.Name()) } @@ -169,7 +170,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // Cleanup resources used by children removed from the config. for name, oldChild := range b.children { if _, ok := newConfig.Children[name]; !ok { - oldChild.stop() + oldChild.stop(!envconfig.EnablePriorityLBChildPolicyCache) delete(b.children, name) } } @@ -230,7 +231,7 @@ func (b *priorityBalancer) Close() { // Stop the child policies, this is necessary to stop the init timers in the // children. for _, child := range b.children { - child.stop() + child.stop(true) } } diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_child.go b/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_child.go index 8c09b10117..4cc4ae9880 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_child.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_child.go @@ -126,12 +126,16 @@ func (cb *childBalancer) sendUpdate() { // It doesn't do it directly. It asks the balancer group to remove it. // // Note that the underlying balancer group could keep the child in a cache. -func (cb *childBalancer) stop() { +func (cb *childBalancer) stop(immediate bool) { if !cb.started { return } cb.stopInitTimer() - cb.parent.bg.Remove(cb.name) + if immediate { + cb.parent.bg.RemoveImmediately(cb.name) + } else { + cb.parent.bg.Remove(cb.name) + } cb.started = false cb.state = balancer.State{ ConnectivityState: connectivity.Connecting, diff --git a/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_priority.go index a57df7d142..3181ce2fd4 100644 --- a/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_priority.go +++ b/vendor/google.golang.org/grpc/internal/xds/balancer/priority/balancer_priority.go @@ -128,7 +128,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { b.logger.Warningf("Priority name %q is not found in list of child policies", name) continue } - child.stop() + child.stop(false) } } diff --git a/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/authority.go b/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/authority.go index b8cb78fbdf..3aff20bb43 100644 --- a/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/authority.go @@ -784,7 +784,7 @@ func (a *authority) unwatchResource(rType ResourceType, resourceName string, wat // reference to the xdsChannels. if len(a.resources) == 0 { if a.logger.V(2) { - a.logger.Infof("Removing last watch for for any resource type, releasing reference to the xdsChannel") + a.logger.Infof("Removing last watch for any resource type, releasing reference to the xdsChannel") } a.closeXDSChannels() } diff --git a/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/channel.go b/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/channel.go index 9da5eb3618..b4c3e65be0 100644 --- a/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/channel.go +++ b/vendor/google.golang.org/grpc/internal/xds/clients/xdsclient/channel.go @@ -25,6 +25,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" igrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/xds/clients" "google.golang.org/grpc/internal/xds/clients/internal" @@ -253,7 +254,16 @@ func decodeResponse(opts *DecodeOptions, rType *ResourceType, resp response) (ma perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. ret := make(map[string]dataAndErrTuple) // Return result, a map from resource name to either resource data or error. for _, r := range resp.resources { - result, err := rType.Decoder.Decode(NewAnyProto(r), *opts) + result, err := func() (res *DecodeResult, err error) { + defer func() { + if envconfig.XDSRecoverPanicInResourceParsing { + if p := recover(); p != nil { + err = fmt.Errorf("recovered from panic during resource parsing, resource: %v, panic: %v", r, p) + } + } + }() + return rType.Decoder.Decode(NewAnyProto(r), *opts) + }() // Name field of the result is left unpopulated only when resource // deserialization fails. diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index 07bb59cee5..e7fb95b816 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -330,41 +330,57 @@ func (upm *urlPathMatcher) match(data *rpcData) bool { type remoteIPMatcher struct { // ipNet represents the CidrRange that this matcher was configured with. // This is what will remote and destination IP's will be matched against. - ipNet *net.IPNet + ipNet netip.Prefix } func newRemoteIPMatcher(cidrRange *v3corepb.CidrRange) (*remoteIPMatcher, error) { // Convert configuration to a cidrRangeString, as Go standard library has // methods that parse cidr string. cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) - _, ipNet, err := net.ParseCIDR(cidrRangeString) + ipNet, err := netip.ParsePrefix(cidrRangeString) if err != nil { return nil, err } - return &remoteIPMatcher{ipNet: ipNet}, nil + return &remoteIPMatcher{ipNet: ipNet.Masked()}, nil } func (sim *remoteIPMatcher) match(data *rpcData) bool { - ip, _ := netip.ParseAddr(data.peerInfo.Addr.String()) - return sim.ipNet.Contains(net.IP(ip.AsSlice())) + host, _, err := net.SplitHostPort(data.peerInfo.Addr.String()) + if err != nil { + // Fallback for addresses without a port. + host = data.peerInfo.Addr.String() + } + ip, err := netip.ParseAddr(host) + if err != nil { + return false + } + return sim.ipNet.Contains(ip) } type localIPMatcher struct { - ipNet *net.IPNet + ipNet netip.Prefix } func newLocalIPMatcher(cidrRange *v3corepb.CidrRange) (*localIPMatcher, error) { cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) - _, ipNet, err := net.ParseCIDR(cidrRangeString) + ipNet, err := netip.ParsePrefix(cidrRangeString) if err != nil { return nil, err } - return &localIPMatcher{ipNet: ipNet}, nil + return &localIPMatcher{ipNet: ipNet.Masked()}, nil } func (dim *localIPMatcher) match(data *rpcData) bool { - ip, _ := netip.ParseAddr(data.localAddr.String()) - return dim.ipNet.Contains(net.IP(ip.AsSlice())) + host, _, err := net.SplitHostPort(data.localAddr.String()) + if err != nil { + // Fallback for addresses without a port. + host = data.localAddr.String() + } + ip, err := netip.ParseAddr(host) + if err != nil { + return false + } + return dim.ipNet.Contains(ip) } // portMatcher matches on whether the destination port of the RPC matches the diff --git a/vendor/google.golang.org/grpc/internal/xds/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/internal/xds/resolver/serviceconfig.go index 40a423f1f1..2846412470 100644 --- a/vendor/google.golang.org/grpc/internal/xds/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/xds/resolver/serviceconfig.go @@ -75,12 +75,15 @@ type xdsClusterManagerConfig struct { // serviceConfigJSON produces a service config in JSON format that contains LB // policy config for the "xds_cluster_manager" LB policy, with entries in the // children map for all active clusters. -func serviceConfigJSON(activeClusters map[string]*clusterInfo) []byte { +func serviceConfigJSON(activeClusters map[string]*clusterInfo, activePlugins map[string]*clusterInfo) []byte { // Generate children (all entries in activeClusters). children := make(map[string]xdsChildConfig) for cluster, ci := range activeClusters { children[cluster] = ci.cfg } + for plugin, ci := range activePlugins { + children[plugin] = ci.cfg + } sc := serviceConfig{ LoadBalancingConfig: newBalancerConfig( @@ -156,6 +159,7 @@ type configSelector struct { virtualHost virtualHost routes []route clusters map[string]*clusterInfo + plugins map[string]*clusterInfo httpFilterConfig []xdsresource.HTTPFilter } @@ -194,7 +198,13 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP // Add a ref to the selected cluster, as this RPC needs this cluster until // it is committed. - ref := &cs.clusters[cluster.name].refCount + var ref *int32 + if info, ok := cs.clusters[cluster.name]; ok { + ref = &info.refCount + } + if info, ok := cs.plugins[cluster.name]; ok { + ref = &info.refCount + } atomic.AddInt32(ref, 1) lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name) @@ -209,10 +219,25 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP OnCommitted: func() { // When the RPC is committed, the cluster is no longer required. // Decrease its ref. - if v := atomic.AddInt32(ref, -1); v == 0 { - // This entry will be removed from activeClusters when - // producing the service config for the empty update. - cs.sendNewServiceConfig() + if info, ok := cs.clusters[cluster.name]; ok { + ref := &info.refCount + if v := atomic.AddInt32(ref, -1); v == 0 { + // We call unsubscribe rather than sendNewServiceConfig to + // prevent redundant updates. If the reference count in the + // dependency manager drops to zero, it will automatically + // trigger a service config update with this cluster + // removed. Calling unsubscribe allows the dependency + // manager to handle the update flow once and for all. + info.unsubscribe() + } + } + if info, ok := cs.plugins[cluster.name]; ok { + ref := &info.refCount + if v := atomic.AddInt32(ref, -1); v == 0 { + // This entry will be removed from activePlugins when + // producing a new service config update. + cs.sendNewServiceConfig() + } } }, Interceptor: cluster.interceptor, @@ -311,21 +336,19 @@ func (cs *configSelector) stop() { if cs == nil { return } - // If any refs drop to zero, we'll need a service config update to delete - // the cluster. - needUpdate := false - // Loops over cs.clusters, but these are pointers to entries in - // activeClusters. + // If any reference counts drop to zero, a service config update is required + // to remove the clusters. Since the old config selector is stopped + // after a new one is active, we must trigger a subsequent update to delete + // the now-unused clusters. for _, ci := range cs.clusters { if v := atomic.AddInt32(&ci.refCount, -1); v == 0 { - needUpdate = true + ci.unsubscribe() } } - // We stop the old config selector immediately after sending a new config - // selector; we need another update to delete clusters from the config (if - // we don't have another update pending already). - if needUpdate { - cs.sendNewServiceConfig() + for _, ci := range cs.plugins { + if v := atomic.AddInt32(&ci.refCount, -1); v == 0 { + cs.sendNewServiceConfig() + } } } diff --git a/vendor/google.golang.org/grpc/internal/xds/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/internal/xds/resolver/xds_resolver.go index a38b292b00..918ce623ef 100644 --- a/vendor/google.golang.org/grpc/internal/xds/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/internal/xds/resolver/xds_resolver.go @@ -134,6 +134,7 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon xdsClient: client, xdsClientClose: xdsClientClose, activeClusters: make(map[string]*clusterInfo), + activePlugins: make(map[string]*clusterInfo), channelID: rand.Uint64(), ldsResourceName: ldsResourceName, @@ -150,7 +151,21 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon } r.logger = prefixLogger(r) r.logger.Infof("Creating resolver for target: %+v", target) + + dmSet := make(chan struct{}) + // Schedule a callback that blocks until r.dm is set i.e xdsdepmgr.New() + // returns. This acts as a gatekeeper: even if dependency manager sends the + // updates before the xdsdepmgr.New() has a chance to return, they will be + // queued behind this blocker and processed only after initialization is + // complete. + r.serializer.TrySchedule(func(ctx context.Context) { + select { + case <-dmSet: + case <-ctx.Done(): + } + }) r.dm = xdsdepmgr.New(r.ldsResourceName, opts.Authority, r.xdsClient, r) + close(dmSet) return r, nil } @@ -225,8 +240,20 @@ type xdsResolver struct { // callbacks. xdsConfig *xdsresource.XDSConfig // activeClusters is a map from cluster name to information about the - // cluster that includes a ref count and load balancing configuration. - activeClusters map[string]*clusterInfo + // weighted cluster that includes a reference count and load balancing + // configuration. These counts are used only by the resolver. The current + // configSelector holds one reference, and each ongoing RPC holds an + // additional reference. When the count hits zero, the resolver removes the + // cluster from this map and calls unsubscribe. This signals the dependency + // manager to stop the xDS watch once its own reference count reaches zero. + activeClusters map[string]*clusterInfo + // activePlugins is a map from cluster specifier plugin name to information + // about the cluster specifier plugin that includes a ref count and load + // balancing configuration. These counts are used only by the resolver. The + // current configSelector holds one reference, and each ongoing RPC holds an + // additional reference. When the count hits zero, the resolver removes the + // plugin name from this map. + activePlugins map[string]*clusterInfo curConfigSelector stoppableConfigSelector } @@ -301,11 +328,9 @@ func (r *xdsResolver) sendNewServiceConfig(cs stoppableConfigSelector) bool { // Delete entries from r.activeClusters with zero references; // otherwise serviceConfigJSON will generate a config including // them. - r.pruneActiveClusters() + r.pruneActiveClustersAndPlugins() - errCS, ok := cs.(*erroringConfigSelector) - if ok && len(r.activeClusters) == 0 { - // There are no clusters and we are sending a failing configSelector. + if errCS, ok := cs.(*erroringConfigSelector); ok { // Send an empty config, which picks pick-first, with no address, and // puts the ClientConn into transient failure. // @@ -320,15 +345,17 @@ func (r *xdsResolver) sendNewServiceConfig(cs stoppableConfigSelector) bool { return true } - sc := serviceConfigJSON(r.activeClusters) + sc := serviceConfigJSON(r.activeClusters, r.activePlugins) if r.logger.V(2) { - r.logger.Infof("For Listener resource %q and RouteConfiguration resource %q, generated service config: %+v", r.ldsResourceName, r.xdsConfig.Listener.RouteConfigName, sc) + r.logger.Infof("For Listener resource %q and RouteConfiguration resource %q, generated service config: %+v", r.ldsResourceName, r.xdsConfig.Listener.APIListener.RouteConfigName, sc) } // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ ServiceConfig: r.cc.ParseServiceConfig(string(sc)), }, cs) + state = xdsresource.SetXDSConfig(state, r.xdsConfig) + state = xdsdepmgr.SetXDSClusterSubscriber(state, r.dm) if err := r.cc.UpdateState(xdsclient.SetClient(state, r.xdsClient)); err != nil { if r.logger.V(2) { r.logger.Infof("Channel rejected new state: %+v with error: %v", state, err) @@ -357,7 +384,8 @@ func (r *xdsResolver) newConfigSelector() (*configSelector, error) { }, routes: make([]route, len(r.xdsConfig.VirtualHost.Routes)), clusters: make(map[string]*clusterInfo), - httpFilterConfig: r.xdsConfig.Listener.HTTPFilters, + plugins: make(map[string]*clusterInfo), + httpFilterConfig: r.xdsConfig.Listener.APIListener.HTTPFilters, } for i, rt := range r.xdsConfig.VirtualHost.Routes { @@ -365,13 +393,13 @@ func (r *xdsResolver) newConfigSelector() (*configSelector, error) { if rt.ClusterSpecifierPlugin != "" { clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin clusters.Add(&routeCluster{name: clusterName}, 1) - ci := r.addOrGetActiveClusterInfo(clusterName) + ci := r.addOrGetActiveClusterInfo(clusterName, "") ci.cfg = xdsChildConfig{ChildPolicy: balancerConfig(r.xdsConfig.RouteConfig.ClusterSpecifierPlugins[rt.ClusterSpecifierPlugin])} - cs.clusters[clusterName] = ci + cs.plugins[clusterName] = ci } else { for _, wc := range rt.WeightedClusters { clusterName := clusterPrefix + wc.Name - interceptor, err := newInterceptor(r.xdsConfig.Listener.HTTPFilters, wc.HTTPFilterConfigOverride, rt.HTTPFilterConfigOverride, r.xdsConfig.VirtualHost.HTTPFilterConfigOverride) + interceptor, err := newInterceptor(r.xdsConfig.Listener.APIListener.HTTPFilters, wc.HTTPFilterConfigOverride, rt.HTTPFilterConfigOverride, r.xdsConfig.VirtualHost.HTTPFilterConfigOverride) if err != nil { return nil, err } @@ -379,7 +407,7 @@ func (r *xdsResolver) newConfigSelector() (*configSelector, error) { name: clusterName, interceptor: interceptor, }, int64(wc.Weight)) - ci := r.addOrGetActiveClusterInfo(clusterName) + ci := r.addOrGetActiveClusterInfo(clusterName, wc.Name) ci.cfg = xdsChildConfig{ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: wc.Name})} cs.clusters[clusterName] = ci } @@ -389,7 +417,7 @@ func (r *xdsResolver) newConfigSelector() (*configSelector, error) { cs.routes[i].m = xdsresource.RouteToMatcher(rt) cs.routes[i].actionType = rt.ActionType if rt.MaxStreamDuration == nil { - cs.routes[i].maxStreamDuration = r.xdsConfig.Listener.MaxStreamDuration + cs.routes[i].maxStreamDuration = r.xdsConfig.Listener.APIListener.MaxStreamDuration } else { cs.routes[i].maxStreamDuration = *rt.MaxStreamDuration } @@ -399,34 +427,63 @@ func (r *xdsResolver) newConfigSelector() (*configSelector, error) { cs.routes[i].autoHostRewrite = rt.AutoHostRewrite } - // Account for this config selector's clusters. Do this after no further - // errors may occur. Note: cs.clusters are pointers to entries in + // Account for this config selector's clusters. Do this after no further + // errors may occur. Note: cs.clusters are pointers to entries in // activeClusters. for _, ci := range cs.clusters { atomic.AddInt32(&ci.refCount, 1) } + for _, ci := range cs.plugins { + atomic.AddInt32(&ci.refCount, 1) + } return cs, nil } -// pruneActiveClusters deletes entries in r.activeClusters with zero -// references. -func (r *xdsResolver) pruneActiveClusters() { +// pruneActiveClustersAndPlugins removes entries from activeClusters and +// activePlugins that have a reference count of zero. For clusters, it also +// invokes the unsubscribe function to signal the dependency manager to stop the +// xDS watch. Because cluster specifier plugins do not have their own watches, +// they are simply removed from the map without an unsubscribe call. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) pruneActiveClustersAndPlugins() { for cluster, ci := range r.activeClusters { if atomic.LoadInt32(&ci.refCount) == 0 { + ci.unsubscribe() delete(r.activeClusters, cluster) } } + for cluster, ci := range r.activePlugins { + if atomic.LoadInt32(&ci.refCount) == 0 { + delete(r.activePlugins, cluster) + } + } } -func (r *xdsResolver) addOrGetActiveClusterInfo(name string) *clusterInfo { - ci := r.activeClusters[name] - if ci != nil { +// addOrGetActiveClusterInfo returns the clusterInfo for the provided key, +// creating it if it does not exist. It accepts the following parameters: +// - key: Formatted as "cluster:" or "cluster_specifier_plugin:", +// this is the lookup key for the activeClusters or activePlugins maps. +// - name: The actual xDS resource name used to initiate a CDS watch. +// If empty (e.g., for plugins), no resource watch is triggered. +// +// This function manages entry creation and xDS subscriptions but does not +// increment the reference count of the returned clusterInfo. +func (r *xdsResolver) addOrGetActiveClusterInfo(key string, name string) *clusterInfo { + if name == "" { + ci, ok := r.activePlugins[key] + if !ok { + ci = &clusterInfo{} + r.activePlugins[key] = ci + } return ci } - - ci = &clusterInfo{refCount: 0} - r.activeClusters[name] = ci + ci, ok := r.activeClusters[key] + if !ok { + ci = &clusterInfo{unsubscribe: r.dm.SubscribeToCluster(name)} + r.activeClusters[key] = ci + } return ci } @@ -436,6 +493,13 @@ type clusterInfo struct { // cfg is the child configuration for this cluster, containing either the // csp config or the cds cluster config. cfg xdsChildConfig + // unsubscribe is the function to call to unsubscribe from this cluster's + // CDS resource. It is populated only for clusters in activeClusters and not + // for cluster specifier plugins. When invoked, it decrements the reference + // count in the dependency manager; once that count reaches zero, the + // underlying CDS watch is terminated. Plugins do not have associated + // watches and therefore do not require an unsubscribe function. + unsubscribe func() } // Contains common functionality to be executed when resources of either type diff --git a/vendor/google.golang.org/grpc/internal/xds/server/conn_wrapper.go b/vendor/google.golang.org/grpc/internal/xds/server/conn_wrapper.go index 840da7ca68..71f5c6a91a 100644 --- a/vendor/google.golang.org/grpc/internal/xds/server/conn_wrapper.go +++ b/vendor/google.golang.org/grpc/internal/xds/server/conn_wrapper.go @@ -28,7 +28,6 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" ) // connWrapper is a thin wrapper around a net.Conn returned by Accept(). It @@ -45,7 +44,7 @@ type connWrapper struct { net.Conn // The specific filter chain picked for handling this connection. - filterChain *xdsresource.FilterChain + filterChain *filterChain // A reference to the listenerWrapper on which this connection was accepted. parent *listenerWrapper @@ -67,13 +66,7 @@ type connWrapper struct { // The virtual hosts with matchable routes and instantiated HTTP Filters per // route, or an error. - urc *atomic.Pointer[xdsresource.UsableRouteConfiguration] -} - -// UsableRouteConfiguration returns the UsableRouteConfiguration to be used for -// server side routing. -func (c *connWrapper) UsableRouteConfiguration() xdsresource.UsableRouteConfiguration { - return *c.urc.Load() + urc *atomic.Pointer[usableRouteConfiguration] } // SetDeadline makes a copy of the passed in deadline and forwards the call to @@ -99,7 +92,7 @@ func (c *connWrapper) GetDeadline() time.Time { // configuration for this connection. This method is invoked by the // ServerHandshake() method of the XdsCredentials. func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { - if c.filterChain.SecurityCfg == nil { + if c.filterChain.securityCfg == nil { // If the security config is empty, this means that the control plane // did not provide any security configuration and therefore we should // return an empty HandshakeInfo here so that the xdsCreds can use the @@ -110,7 +103,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs() // Identity provider name is mandatory on the server-side, and this is // enforced when the resource is received at the XDSClient layer. - secCfg := c.filterChain.SecurityCfg + secCfg := c.filterChain.securityCfg ip, err := buildProviderFunc(cpc, secCfg.IdentityInstanceName, secCfg.IdentityCertName, true, false) if err != nil { return nil, err diff --git a/vendor/google.golang.org/grpc/internal/xds/server/filter_chain_manager.go b/vendor/google.golang.org/grpc/internal/xds/server/filter_chain_manager.go new file mode 100644 index 0000000000..438c58b91e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/server/filter_chain_manager.go @@ -0,0 +1,414 @@ +/* + * + * Copyright 2026 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package server + +import ( + "errors" + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/httpfilter" + "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" + "google.golang.org/grpc/status" +) + +const ( + // An unspecified destination or source prefix should be considered a less + // specific match than a wildcard prefix, `0.0.0.0/0` or `::/0`. Also, an + // unspecified prefix should match most v4 and v6 addresses compared to the + // wildcard prefixes which match only a specific network (v4 or v6). + // + // We use these constants when looking up the most specific prefix match. A + // wildcard prefix will match 0 bits, and to make sure that a wildcard + // prefix is considered a more specific match than an unspecified prefix, we + // use a value of -1 for the latter. + noPrefixMatch = -2 + unspecifiedPrefixMatch = -1 +) + +// filterChainManager contains the match criteria specified through filter +// chains in a single Listener resource. +// +// For the match criteria and the filter chains, we need to use package local +// structs that are very similar to the xdsresource structs. This is because the +// xdsresource structs are meant to contain only configuration and not runtime +// state. Here, we need to store runtime state such as the usable route +// configuration. +type filterChainManager struct { + dstPrefixes []*destPrefixEntry // Linear lookup list. + defaultFilterChain *filterChain // Default filter chain, if specified. + filterChains []*filterChain // Filter chains managed by this filter chain manager. + routeConfigNames map[string]bool // Route configuration names which need to be dynamically queried. +} + +func newFilterChainManager(filterChainConfigs *xdsresource.NetworkFilterChainMap, defFilterChainConfig *xdsresource.NetworkFilterChainConfig) *filterChainManager { + fcMgr := &filterChainManager{routeConfigNames: make(map[string]bool)} + + if filterChainConfigs != nil { + for _, entry := range filterChainConfigs.DstPrefixes { + dstEntry := &destPrefixEntry{net: entry.Prefix} + + for i, srcPrefixes := range entry.SourceTypeArr { + if len(srcPrefixes.Entries) == 0 { + continue + } + stDest := &sourcePrefixes{} + dstEntry.srcTypeArr[i] = stDest + for _, srcEntryConfig := range srcPrefixes.Entries { + srcEntry := &sourcePrefixEntry{ + net: srcEntryConfig.Prefix, + srcPortMap: make(map[int]*filterChain, len(srcEntryConfig.PortMap)), + } + stDest.srcPrefixes = append(stDest.srcPrefixes, srcEntry) + + for port, fcConfig := range srcEntryConfig.PortMap { + fc := fcMgr.filterChainFromConfig(&fcConfig) + if fc.routeConfigName != "" { + fcMgr.routeConfigNames[fc.routeConfigName] = true + } + srcEntry.srcPortMap[port] = fc + fcMgr.filterChains = append(fcMgr.filterChains, fc) + } + } + } + fcMgr.dstPrefixes = append(fcMgr.dstPrefixes, dstEntry) + } + } + + if defFilterChainConfig != nil && !defFilterChainConfig.IsEmpty() { + fc := fcMgr.filterChainFromConfig(defFilterChainConfig) + if fc.routeConfigName != "" { + fcMgr.routeConfigNames[fc.routeConfigName] = true + } + fcMgr.defaultFilterChain = fc + fcMgr.filterChains = append(fcMgr.filterChains, fc) + } + + return fcMgr +} + +func (fcm *filterChainManager) filterChainFromConfig(config *xdsresource.NetworkFilterChainConfig) *filterChain { + fc := &filterChain{ + securityCfg: config.SecurityCfg, + routeConfigName: config.HTTPConnMgr.RouteConfigName, + inlineRouteConfig: config.HTTPConnMgr.InlineRouteConfig, + httpFilters: config.HTTPConnMgr.HTTPFilters, + usableRouteConfiguration: &atomic.Pointer[usableRouteConfiguration]{}, // Active state + } + fc.usableRouteConfiguration.Store(&usableRouteConfiguration{}) + return fc +} + +// destPrefixEntry contains a destination prefix entry and associated source +// type matchers. +type destPrefixEntry struct { + net *net.IPNet + srcTypeArr sourceTypesArray +} + +// An array for the fixed number of source types that we have. +type sourceTypesArray [3]*sourcePrefixes + +// sourceType specifies the connection source IP match type. +type sourceType int + +const ( + sourceTypeAny sourceType = iota // matches connection attempts from any source + sourceTypeSameOrLoopback // matches connection attempts from the same host + sourceTypeExternal // matches connection attempts from a different host +) + +// sourcePrefixes contains a list of source prefix entries. +type sourcePrefixes struct { + srcPrefixes []*sourcePrefixEntry +} + +// sourcePrefixEntry contains a source prefix entry and associated source port +// matchers. +type sourcePrefixEntry struct { + net *net.IPNet + srcPortMap map[int]*filterChain +} + +// filterChain captures information from within a FilterChain message in a +// Listener resource. This struct contains the active state of a filter chain, +// which includes the usable route configuration. +type filterChain struct { + securityCfg *xdsresource.SecurityConfig + httpFilters []xdsresource.HTTPFilter + routeConfigName string + inlineRouteConfig *xdsresource.RouteConfigUpdate + usableRouteConfiguration *atomic.Pointer[usableRouteConfiguration] +} + +// usableRouteConfiguration contains a matchable route configuration, with +// instantiated HTTP Filters per route. +type usableRouteConfiguration struct { + vhs []virtualHostWithInterceptors + err error + nodeID string // For logging purposes. Populated by the listener wrapper. +} + +// virtualHostWithInterceptors captures information present in a VirtualHost +// update, and also contains routes with instantiated HTTP Filters. +type virtualHostWithInterceptors struct { + domains []string + routes []routeWithInterceptors +} + +// routeWithInterceptors captures information in a Route, and contains +// a usable matcher and also instantiated HTTP Filters. +type routeWithInterceptors struct { + matcher *xdsresource.CompositeMatcher + actionType xdsresource.RouteActionType + interceptors []resolver.ServerInterceptor +} + +type lookupParams struct { + isUnspecifiedListener bool // Whether the server is listening on a wildcard address. + dstAddr net.IP // dstAddr is the local address of an incoming connection. + srcAddr net.IP // srcAddr is the remote address of an incoming connection. + srcPort int // srcPort is the remote port of an incoming connection. +} + +// lookup returns the most specific matching filter chain to be used for an +// incoming connection on the server side. Returns a non-nil error if no +// matching filter chain could be found. +func (fcm *filterChainManager) lookup(params lookupParams) (*filterChain, error) { + dstPrefixes := filterByDestinationPrefixes(fcm.dstPrefixes, params.isUnspecifiedListener, params.dstAddr) + if len(dstPrefixes) == 0 { + if fcm.defaultFilterChain != nil { + return fcm.defaultFilterChain, nil + } + return nil, fmt.Errorf("no matching filter chain based on destination prefix match for %+v", params) + } + + srcType := sourceTypeExternal + if params.srcAddr.Equal(params.dstAddr) || params.srcAddr.IsLoopback() { + srcType = sourceTypeSameOrLoopback + } + srcPrefixes := filterBySourceType(dstPrefixes, srcType) + if len(srcPrefixes) == 0 { + if fcm.defaultFilterChain != nil { + return fcm.defaultFilterChain, nil + } + return nil, fmt.Errorf("no matching filter chain based on source type match for %+v", params) + } + srcPrefixEntry, err := filterBySourcePrefixes(srcPrefixes, params.srcAddr) + if err != nil { + return nil, err + } + if fc := filterBySourcePorts(srcPrefixEntry, params.srcPort); fc != nil { + return fc, nil + } + if fcm.defaultFilterChain != nil { + return fcm.defaultFilterChain, nil + } + return nil, fmt.Errorf("no matching filter chain after all match criteria for %+v", params) +} + +// filterByDestinationPrefixes is the first stage of the filter chain +// matching algorithm. It takes the complete set of configured filter chain +// matchers and returns the most specific matchers based on the destination +// prefix match criteria (the prefixes which match the most number of bits). +func filterByDestinationPrefixes(dstPrefixes []*destPrefixEntry, isUnspecified bool, dstAddr net.IP) []*destPrefixEntry { + if !isUnspecified { + // Destination prefix matchers are considered only when the listener is + // bound to the wildcard address. + return dstPrefixes + } + + var matchingDstPrefixes []*destPrefixEntry + maxSubnetMatch := noPrefixMatch + for _, prefix := range dstPrefixes { + if prefix.net != nil && !prefix.net.Contains(dstAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingDstPrefixes = make([]*destPrefixEntry, 0, 1) + } + matchingDstPrefixes = append(matchingDstPrefixes, prefix) + } + return matchingDstPrefixes +} + +// filterBySourceType is the second stage of the matching algorithm. It +// trims the filter chains based on the most specific source type match. +// +// srcType is one of sourceTypeSameOrLoopback or sourceTypeExternal. +func filterBySourceType(dstPrefixes []*destPrefixEntry, srcType sourceType) []*sourcePrefixes { + var ( + srcPrefixes []*sourcePrefixes + bestSrcTypeMatch sourceType + ) + for _, prefix := range dstPrefixes { + match := srcType + srcPrefix := prefix.srcTypeArr[srcType] + if srcPrefix == nil { + match = sourceTypeAny + srcPrefix = prefix.srcTypeArr[sourceTypeAny] + } + if match < bestSrcTypeMatch { + continue + } + if match > bestSrcTypeMatch { + bestSrcTypeMatch = match + srcPrefixes = make([]*sourcePrefixes, 0) + } + if srcPrefix != nil { + // The source type array always has 3 entries, but these could be + // nil if the appropriate source type match was not specified. + srcPrefixes = append(srcPrefixes, srcPrefix) + } + } + return srcPrefixes +} + +// filterBySourcePrefixes is the third stage of the filter chain matching +// algorithm. It trims the filter chains based on the source prefix. At most one +// filter chain with the most specific match progress to the next stage. +func filterBySourcePrefixes(srcPrefixes []*sourcePrefixes, srcAddr net.IP) (*sourcePrefixEntry, error) { + var matchingSrcPrefixes []*sourcePrefixEntry + maxSubnetMatch := noPrefixMatch + for _, sp := range srcPrefixes { + for _, prefix := range sp.srcPrefixes { + if prefix.net != nil && !prefix.net.Contains(srcAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingSrcPrefixes = make([]*sourcePrefixEntry, 0, 1) + } + matchingSrcPrefixes = append(matchingSrcPrefixes, prefix) + } + } + if len(matchingSrcPrefixes) == 0 { + // Finding no match is not an error condition. The caller will end up + // using the default filter chain if one was configured. + return nil, nil + } + if len(matchingSrcPrefixes) != 1 { + // We expect at most a single matching source prefix entry at this + // point. If we have multiple entries here, and some of their source + // port matchers had wildcard entries, we could be left with more than + // one matching filter chain and hence would have been flagged as an + // invalid configuration at config validation time. + return nil, errors.New("multiple matching filter chains") + } + return matchingSrcPrefixes[0], nil +} + +// filterBySourcePorts is the last stage of the filter chain matching +// algorithm. It trims the filter chains based on the source ports. +func filterBySourcePorts(spe *sourcePrefixEntry, srcPort int) *filterChain { + if spe == nil { + return nil + } + // A match could be a wildcard match (this happens when the match + // criteria does not specify source ports) or a specific port match (this + // happens when the match criteria specifies a set of ports and the source + // port of the incoming connection matches one of the specified ports). The + // latter is considered to be a more specific match. + if fc := spe.srcPortMap[srcPort]; fc != nil { + return fc + } + if fc := spe.srcPortMap[0]; fc != nil { + return fc + } + return nil +} + +// constructUsableRouteConfiguration takes Route Configuration and converts it +// into matchable route configuration, with instantiated HTTP Filters per route. +func (fc *filterChain) constructUsableRouteConfiguration(config xdsresource.RouteConfigUpdate) *usableRouteConfiguration { + vhs := make([]virtualHostWithInterceptors, 0, len(config.VirtualHosts)) + for _, vh := range config.VirtualHosts { + vhwi, err := fc.convertVirtualHost(vh) + if err != nil { + // Non nil if (lds + rds) fails, shouldn't happen since validated by + // xDS Client, treat as L7 error but shouldn't happen. + return &usableRouteConfiguration{err: fmt.Errorf("virtual host construction: %v", err)} + } + vhs = append(vhs, vhwi) + } + return &usableRouteConfiguration{vhs: vhs} +} + +func (fc *filterChain) convertVirtualHost(virtualHost *xdsresource.VirtualHost) (virtualHostWithInterceptors, error) { + rs := make([]routeWithInterceptors, len(virtualHost.Routes)) + for i, r := range virtualHost.Routes { + rs[i].actionType = r.ActionType + rs[i].matcher = xdsresource.RouteToMatcher(r) + for _, filter := range fc.httpFilters { + // Route is highest priority on server side, as there is no concept + // of an upstream cluster on server side. + override := r.HTTPFilterConfigOverride[filter.Name] + if override == nil { + // Virtual Host is second priority. + override = virtualHost.HTTPFilterConfigOverride[filter.Name] + } + sb, ok := filter.Filter.(httpfilter.ServerInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return virtualHostWithInterceptors{}, fmt.Errorf("filter does not support use in server") + } + si, err := sb.BuildServerInterceptor(filter.Config, override) + if err != nil { + return virtualHostWithInterceptors{}, fmt.Errorf("filter construction: %v", err) + } + if si != nil { + rs[i].interceptors = append(rs[i].interceptors, si) + } + } + } + return virtualHostWithInterceptors{domains: virtualHost.Domains, routes: rs}, nil +} + +// statusErrWithNodeID returns an error produced by the status package with the +// specified code and message, and includes the xDS node ID. +func (rc *usableRouteConfiguration) statusErrWithNodeID(c codes.Code, msg string, args ...any) error { + return status.Error(c, fmt.Sprintf("[xDS node id: %v]: %s", rc.nodeID, fmt.Sprintf(msg, args...))) +} diff --git a/vendor/google.golang.org/grpc/internal/xds/server/listener_wrapper.go b/vendor/google.golang.org/grpc/internal/xds/server/listener_wrapper.go index 15f6c17c3a..7304a60cb8 100644 --- a/vendor/google.golang.org/grpc/internal/xds/server/listener_wrapper.go +++ b/vendor/google.golang.org/grpc/internal/xds/server/listener_wrapper.go @@ -143,7 +143,7 @@ type listenerWrapper struct { // Current serving mode. mode connectivity.ServingMode // Filter chain manager currently serving. - activeFilterChainManager *xdsresource.FilterChainManager + activeFilterChainManager *filterChainManager // conns accepted with configuration from activeFilterChainManager. conns map[*connWrapper]bool @@ -153,7 +153,7 @@ type listenerWrapper struct { // Pending filter chain manager. Will go active once rdsHandler has received // all the RDS resources this filter chain manager needs. - pendingFilterChainManager *xdsresource.FilterChainManager + pendingFilterChainManager *filterChainManager // rdsHandler is used for any dynamic RDS resources specified in a LDS // update. @@ -197,19 +197,19 @@ func (l *listenerWrapper) maybeUpdateFilterChains() { func (l *listenerWrapper) handleRDSUpdate(routeName string, rcu rdsWatcherUpdate) { // Update any filter chains that point to this route configuration. if l.activeFilterChainManager != nil { - for _, fc := range l.activeFilterChainManager.FilterChains() { - if fc.RouteConfigName != routeName { + for _, fc := range l.activeFilterChainManager.filterChains { + if fc.routeConfigName != routeName { continue } if rcu.err != nil && rcu.data == nil { // Either NACK before update, or resource not found triggers this conditional. - urc := &xdsresource.UsableRouteConfiguration{Err: rcu.err} - urc.NodeID = l.xdsNodeID - fc.UsableRouteConfiguration.Store(urc) + urc := &usableRouteConfiguration{err: rcu.err} + urc.nodeID = l.xdsNodeID + fc.usableRouteConfiguration.Store(urc) continue } - urc := fc.ConstructUsableRouteConfiguration(*rcu.data) - urc.NodeID = l.xdsNodeID - fc.UsableRouteConfiguration.Store(urc) + urc := fc.constructUsableRouteConfiguration(*rcu.data) + urc.nodeID = l.xdsNodeID + fc.usableRouteConfiguration.Store(urc) } } if l.rdsHandler.determineRouteConfigurationReady() { @@ -222,23 +222,23 @@ func (l *listenerWrapper) handleRDSUpdate(routeName string, rcu rdsWatcherUpdate // route configurations, uses that, otherwise uses cached rdsHandler updates. // Must be called within an xDS Client Callback. func (l *listenerWrapper) instantiateFilterChainRoutingConfigurationsLocked() { - for _, fc := range l.activeFilterChainManager.FilterChains() { - if fc.InlineRouteConfig != nil { - urc := fc.ConstructUsableRouteConfiguration(*fc.InlineRouteConfig) - urc.NodeID = l.xdsNodeID - fc.UsableRouteConfiguration.Store(urc) // Can't race with an RPC coming in but no harm making atomic. + for _, fc := range l.activeFilterChainManager.filterChains { + if fc.inlineRouteConfig != nil { + urc := fc.constructUsableRouteConfiguration(*fc.inlineRouteConfig) + urc.nodeID = l.xdsNodeID + fc.usableRouteConfiguration.Store(urc) // Can't race with an RPC coming in but no harm making atomic. continue } // Inline configuration constructed once here, will remain for lifetime of filter chain. - rcu := l.rdsHandler.updates[fc.RouteConfigName] + rcu := l.rdsHandler.updates[fc.routeConfigName] if rcu.err != nil && rcu.data == nil { - urc := &xdsresource.UsableRouteConfiguration{Err: rcu.err} - urc.NodeID = l.xdsNodeID - fc.UsableRouteConfiguration.Store(urc) + urc := &usableRouteConfiguration{err: rcu.err} + urc.nodeID = l.xdsNodeID + fc.usableRouteConfiguration.Store(urc) continue } - urc := fc.ConstructUsableRouteConfiguration(*rcu.data) - urc.NodeID = l.xdsNodeID - fc.UsableRouteConfiguration.Store(urc) // Can't race with an RPC coming in but no harm making atomic. + urc := fc.constructUsableRouteConfiguration(*rcu.data) + urc.nodeID = l.xdsNodeID + fc.usableRouteConfiguration.Store(urc) // Can't race with an RPC coming in but no harm making atomic. } } @@ -297,11 +297,11 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { continue } - fc, err := l.activeFilterChainManager.Lookup(xdsresource.FilterChainLookupParams{ - IsUnspecifiedListener: l.isUnspecifiedAddr, - DestAddr: destAddr.IP, - SourceAddr: srcAddr.IP, - SourcePort: srcAddr.Port, + fc, err := l.activeFilterChainManager.lookup(lookupParams{ + isUnspecifiedListener: l.isUnspecifiedAddr, + dstAddr: destAddr.IP, + srcAddr: srcAddr.IP, + srcPort: srcAddr.Port, }) if err != nil { l.mu.Unlock() @@ -320,7 +320,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } - cw := &connWrapper{Conn: conn, filterChain: fc, parent: l, urc: fc.UsableRouteConfiguration} + cw := &connWrapper{Conn: conn, filterChain: fc, parent: l, urc: fc.usableRouteConfiguration} l.conns[cw] = true l.mu.Unlock() return cw, nil @@ -397,7 +397,7 @@ func (lw *ldsWatcher) ResourceChanged(update *xdsresource.ListenerUpdate, onDone lw.logger.Infof("LDS watch for resource %q received update: %#v", lw.name, update) } l := lw.parent - ilc := update.InboundListenerCfg + ilc := update.TCPListener // Make sure that the socket address on the received Listener resource // matches the address of the net.Listener passed to us by the user. This // check is done here instead of at the XDSClient layer because of the @@ -420,8 +420,9 @@ func (lw *ldsWatcher) ResourceChanged(update *xdsresource.ListenerUpdate, onDone return } - l.pendingFilterChainManager = ilc.FilterChains - l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames) + fcm := newFilterChainManager(&ilc.FilterChains, &ilc.DefaultFilterChain) + l.pendingFilterChainManager = fcm + l.rdsHandler.updateRouteNamesToWatch(fcm.routeConfigNames) if l.rdsHandler.determineRouteConfigurationReady() { l.maybeUpdateFilterChains() diff --git a/vendor/google.golang.org/grpc/internal/xds/server/routing.go b/vendor/google.golang.org/grpc/internal/xds/server/routing.go new file mode 100644 index 0000000000..9e5c7943fd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/server/routing.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2026 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "context" + "errors" + "fmt" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// RouteAndProcess routes the incoming RPC to a configured route in the route +// table and also processes the RPC by running the incoming RPC through any HTTP +// Filters configured. +func RouteAndProcess(ctx context.Context) error { + conn := transport.GetConnection(ctx) + cw, ok := conn.(*connWrapper) + if !ok { + return errors.New("missing virtual hosts in incoming context") + } + + rc := cw.urc.Load() + // Error out at routing l7 level with a status code UNAVAILABLE, represents + // an nack before usable route configuration or resource not found for RDS + // or error combining LDS + RDS (Shouldn't happen). + if rc.err != nil { + if logger.V(2) { + logger.Infof("RPC on connection with xDS Configuration error: %v", rc.err) + } + return status.Error(codes.Unavailable, fmt.Sprintf("error from xDS configuration for matched route configuration: %v", rc.err)) + } + + mn, ok := grpc.Method(ctx) + if !ok { + return errors.New("missing method name in incoming context") + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errors.New("missing metadata in incoming context") + } + // A41 added logic to the core grpc implementation to guarantee that once + // the RPC gets to this point, there will be a single, unambiguous authority + // present in the header map. + authority := md.Get(":authority") + // authority[0] is safe because of the guarantee mentioned above. + vh := findBestMatchingVirtualHostServer(authority[0], rc.vhs) + if vh == nil { + return rc.statusErrWithNodeID(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") + } + + var rwi *routeWithInterceptors + rpcInfo := iresolver.RPCInfo{ + Context: ctx, + Method: mn, + } + for _, r := range vh.routes { + if r.matcher.Match(rpcInfo) { + // "NonForwardingAction is expected for all Routes used on + // server-side; a route with an inappropriate action causes RPCs + // matching that route to fail with UNAVAILABLE." - A36 + if r.actionType != xdsresource.RouteActionNonForwardingAction { + return rc.statusErrWithNodeID(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") + } + rwi = &r + break + } + } + if rwi == nil { + return rc.statusErrWithNodeID(codes.Unavailable, "the incoming RPC did not match a configured Route") + } + for _, interceptor := range rwi.interceptors { + if err := interceptor.AllowRPC(ctx); err != nil { + return rc.statusErrWithNodeID(codes.PermissionDenied, "Incoming RPC is not allowed: %v", err) + } + } + return nil +} + +// findBestMatchingVirtualHostServer returns the virtual host whose domains field best +// matches host +// +// The domains field support 4 different matching pattern types: +// +// - Exact match +// - Suffix match (e.g. “*ABC”) +// - Prefix match (e.g. “ABC*) +// - Universal match (e.g. “*”) +// +// The best match is defined as: +// - A match is better if it’s matching pattern type is better. +// * Exact match > suffix match > prefix match > universal match. +// +// - If two matches are of the same pattern type, the longer match is +// better. +// * This is to compare the length of the matching pattern, e.g. “*ABCDE” > +// “*ABC” +func findBestMatchingVirtualHostServer(authority string, vHosts []virtualHostWithInterceptors) *virtualHostWithInterceptors { + var ( + matchVh *virtualHostWithInterceptors + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.domains { + typ, matched := match(domain, authority) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = &vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} + +type domainMatchType int + +const ( + domainMatchTypeInvalid domainMatchType = iota + domainMatchTypeUniversal + domainMatchTypePrefix + domainMatchTypeSuffix + domainMatchTypeExact +) + +// Exact > Suffix > Prefix > Universal > Invalid. +func (t domainMatchType) betterThan(b domainMatchType) bool { + return t > b +} + +func matchTypeForDomain(d string) domainMatchType { + if d == "" { + return domainMatchTypeInvalid + } + if d == "*" { + return domainMatchTypeUniversal + } + if strings.HasPrefix(d, "*") { + return domainMatchTypeSuffix + } + if strings.HasSuffix(d, "*") { + return domainMatchTypePrefix + } + if strings.Contains(d, "*") { + return domainMatchTypeInvalid + } + return domainMatchTypeExact +} + +func match(domain, host string) (domainMatchType, bool) { + switch typ := matchTypeForDomain(domain); typ { + case domainMatchTypeInvalid: + return typ, false + case domainMatchTypeUniversal: + return typ, true + case domainMatchTypePrefix: + // abc.* + return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) + case domainMatchTypeSuffix: + // *.123 + return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) + case domainMatchTypeExact: + return typ, domain == host + default: + return domainMatchTypeInvalid, false + } +} diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdslbregistry/converter/converter.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdslbregistry/converter/converter.go index d38b9db824..11fcce2c6c 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdslbregistry/converter/converter.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdslbregistry/converter/converter.go @@ -158,7 +158,7 @@ func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, _ int) (json if blackoutPeriodCfg := cswrrProto.GetBlackoutPeriod(); blackoutPeriodCfg != nil { wrrLBCfg.BlackoutPeriod = internalserviceconfig.Duration(blackoutPeriodCfg.AsDuration()) } - if weightExpirationPeriodCfg := cswrrProto.GetBlackoutPeriod(); weightExpirationPeriodCfg != nil { + if weightExpirationPeriodCfg := cswrrProto.GetWeightExpirationPeriod(); weightExpirationPeriodCfg != nil { wrrLBCfg.WeightExpirationPeriod = internalserviceconfig.Duration(weightExpirationPeriodCfg.AsDuration()) } if weightUpdatePeriodCfg := cswrrProto.GetWeightUpdatePeriod(); weightUpdatePeriodCfg != nil { diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/filter_chain.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/filter_chain.go index 50b72e0de6..03d538957e 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/filter_chain.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/filter_chain.go @@ -18,593 +18,70 @@ package xdsresource import ( - "errors" "fmt" "net" - "sync/atomic" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/xds/httpfilter" "google.golang.org/grpc/internal/xds/xdsclient/xdsresource/version" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" ) -const ( - // Used as the map key for unspecified prefixes. The actual value of this - // key is immaterial. - unspecifiedPrefixMapKey = "unspecified" - - // An unspecified destination or source prefix should be considered a less - // specific match than a wildcard prefix, `0.0.0.0/0` or `::/0`. Also, an - // unspecified prefix should match most v4 and v6 addresses compared to the - // wildcard prefixes which match only a specific network (v4 or v6). - // - // We use these constants when looking up the most specific prefix match. A - // wildcard prefix will match 0 bits, and to make sure that a wildcard - // prefix is considered a more specific match than an unspecified prefix, we - // use a value of -1 for the latter. - noPrefixMatch = -2 - unspecifiedPrefixMatch = -1 -) - -// FilterChain captures information from within a FilterChain message in a -// Listener resource. -type FilterChain struct { +// NetworkFilterChainMap contains the match configuration for network filter +// chains on the server side. It is a multi-level map structure to facilitate +// efficient matching of incoming connections based on destination IP, source +// {type, IP and port}. +type NetworkFilterChainMap struct { + // DstPrefixes is the list of destination prefix entries to match on. + DstPrefixes []DestinationPrefixEntry +} + +// DestinationPrefixEntry contains a destination prefix entry and the associated +// source type matchers. +type DestinationPrefixEntry struct { + // Prefix is the destination IP prefix. + Prefix *net.IPNet + // SourceTypeArr contains the source type matchers. The supported source + // types and their associated indices in the array are: + // - 0: Any: matches connection attempts from any source. + // - 1: SameOrLoopback: matches connection attempts from the same host. + // - 2: External: matches connection attempts from a different host. + SourceTypeArr [3]SourcePrefixes +} + +// SourcePrefixes contains a list of source prefix entries to match on. +type SourcePrefixes struct { + // Entries is the list of source prefix entries. + Entries []SourcePrefixEntry +} + +// SourcePrefixEntry contains a source prefix entry and the associated source +// port matchers. +type SourcePrefixEntry struct { + // Prefix is the source IP prefix. + Prefix *net.IPNet + // PortMap contains the matchers for source ports. + PortMap map[int]NetworkFilterChainConfig +} + +// NetworkFilterChainConfig contains the configuration for a network filter +// chain on the server side. The only support network filter is the HTTP +// connection manager. +type NetworkFilterChainConfig struct { // SecurityCfg contains transport socket security configuration. SecurityCfg *SecurityConfig - // HTTPFilters represent the HTTP Filters that comprise this FilterChain. - HTTPFilters []HTTPFilter - // RouteConfigName is the route configuration name for this FilterChain. - // - // Exactly one of RouteConfigName and InlineRouteConfig is set. - RouteConfigName string - // InlineRouteConfig is the inline route configuration (RDS response) - // returned for this filter chain. - // - // Exactly one of RouteConfigName and InlineRouteConfig is set. - InlineRouteConfig *RouteConfigUpdate - // UsableRouteConfiguration is the routing configuration for this filter - // chain (LDS + RDS). - UsableRouteConfiguration *atomic.Pointer[UsableRouteConfiguration] -} - -// VirtualHostWithInterceptors captures information present in a VirtualHost -// update, and also contains routes with instantiated HTTP Filters. -type VirtualHostWithInterceptors struct { - // Domains are the domain names which map to this Virtual Host. On the - // server side, this will be dictated by the :authority header of the - // incoming RPC. - Domains []string - // Routes are the Routes for this Virtual Host. - Routes []RouteWithInterceptors -} - -// RouteWithInterceptors captures information in a Route, and contains -// a usable matcher and also instantiated HTTP Filters. -type RouteWithInterceptors struct { - // M is the matcher used to match to this route. - M *CompositeMatcher - // ActionType is the type of routing action to initiate once matched to. - ActionType RouteActionType - // Interceptors are interceptors instantiated for this route. These will be - // constructed from a combination of the top level configuration and any - // HTTP Filter overrides present in Virtual Host or Route. - Interceptors []resolver.ServerInterceptor -} - -// UsableRouteConfiguration contains a matchable route configuration, with -// instantiated HTTP Filters per route. -type UsableRouteConfiguration struct { - VHS []VirtualHostWithInterceptors - Err error - NodeID string // For logging purposes. Populated by the listener wrapper. -} - -// StatusErrWithNodeID returns an error produced by the status package with the -// specified code and message, and includes the xDS node ID. -func (rc *UsableRouteConfiguration) StatusErrWithNodeID(c codes.Code, msg string, args ...any) error { - return status.Error(c, fmt.Sprintf("[xDS node id: %v]: %s", rc.NodeID, fmt.Sprintf(msg, args...))) -} - -// ConstructUsableRouteConfiguration takes Route Configuration and converts it -// into matchable route configuration, with instantiated HTTP Filters per route. -func (fc *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) *UsableRouteConfiguration { - vhs := make([]VirtualHostWithInterceptors, 0, len(config.VirtualHosts)) - for _, vh := range config.VirtualHosts { - vhwi, err := fc.convertVirtualHost(vh) - if err != nil { - // Non nil if (lds + rds) fails, shouldn't happen since validated by - // xDS Client, treat as L7 error but shouldn't happen. - return &UsableRouteConfiguration{Err: fmt.Errorf("virtual host construction: %v", err)} - } - vhs = append(vhs, vhwi) - } - return &UsableRouteConfiguration{VHS: vhs} -} - -func (fc *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostWithInterceptors, error) { - rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) - for i, r := range virtualHost.Routes { - rs[i].ActionType = r.ActionType - rs[i].M = RouteToMatcher(r) - for _, filter := range fc.HTTPFilters { - // Route is highest priority on server side, as there is no concept - // of an upstream cluster on server side. - override := r.HTTPFilterConfigOverride[filter.Name] - if override == nil { - // Virtual Host is second priority. - override = virtualHost.HTTPFilterConfigOverride[filter.Name] - } - sb, ok := filter.Filter.(httpfilter.ServerInterceptorBuilder) - if !ok { - // Should not happen if it passed xdsClient validation. - return VirtualHostWithInterceptors{}, fmt.Errorf("filter does not support use in server") - } - si, err := sb.BuildServerInterceptor(filter.Config, override) - if err != nil { - return VirtualHostWithInterceptors{}, fmt.Errorf("filter construction: %v", err) - } - if si != nil { - rs[i].Interceptors = append(rs[i].Interceptors, si) - } - } - } - return VirtualHostWithInterceptors{Domains: virtualHost.Domains, Routes: rs}, nil -} - -// SourceType specifies the connection source IP match type. -type SourceType int - -const ( - // SourceTypeAny matches connection attempts from any source. - SourceTypeAny SourceType = iota - // SourceTypeSameOrLoopback matches connection attempts from the same host. - SourceTypeSameOrLoopback - // SourceTypeExternal matches connection attempts from a different host. - SourceTypeExternal -) - -// FilterChainManager contains all the match criteria specified through all -// filter chains in a single Listener resource. It also contains the default -// filter chain specified in the Listener resource. It provides two important -// pieces of functionality: -// 1. Validate the filter chains in an incoming Listener resource to make sure -// that there aren't filter chains which contain the same match criteria. -// 2. As part of performing the above validation, it builds an internal data -// structure which will if used to look up the matching filter chain at -// connection time. -// -// The logic specified in the documentation around the xDS FilterChainMatch -// proto mentions 8 criteria to match on. -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. -type FilterChainManager struct { - // Destination prefix is the first match criteria that we support. - // Therefore, this multi-stage map is indexed on destination prefixes - // specified in the match criteria. - // Unspecified destination prefix matches end up as a wildcard entry here - // with a key of 0.0.0.0/0. - dstPrefixMap map[string]*destPrefixEntry - - // At connection time, we do not have the actual destination prefix to match - // on. We only have the real destination address of the incoming connection. - // This means that we cannot use the above map at connection time. This list - // contains the map entries from the above map that we can use at connection - // time to find matching destination prefixes in O(n) time. - // - // TODO: Implement LC-trie to support logarithmic time lookups. If that - // involves too much time/effort, sort this slice based on the netmask size. - dstPrefixes []*destPrefixEntry - - def *FilterChain // Default filter chain, if specified. - - // Slice of filter chains managed by this filter chain manager. - fcs []*FilterChain - - // RouteConfigNames are the route configuration names which need to be - // dynamically queried for RDS Configuration for any FilterChains which - // specify to load RDS Configuration dynamically. - RouteConfigNames map[string]bool -} - -// destPrefixEntry is the value type of the map indexed on destination prefixes. -type destPrefixEntry struct { - // The actual destination prefix. Set to nil for unspecified prefixes. - net *net.IPNet - // We need to keep track of the transport protocols seen as part of the - // config validation (and internal structure building) phase. The only two - // values that we support are empty string and "raw_buffer", with the latter - // taking preference. Once we have seen one filter chain with "raw_buffer", - // we can drop everything other filter chain with an empty transport - // protocol. - rawBufferSeen bool - // For each specified source type in the filter chain match criteria, this - // array points to the set of specified source prefixes. - // Unspecified source type matches end up as a wildcard entry here with an - // index of 0, which actually represents the source type `ANY`. - srcTypeArr sourceTypesArray -} - -// An array for the fixed number of source types that we have. -type sourceTypesArray [3]*sourcePrefixes - -// sourcePrefixes contains source prefix related information specified in the -// match criteria. These are pointed to by the array of source types. -type sourcePrefixes struct { - // These are very similar to the 'dstPrefixMap' and 'dstPrefixes' field of - // FilterChainManager. Go there for more info. - srcPrefixMap map[string]*sourcePrefixEntry - srcPrefixes []*sourcePrefixEntry -} - -// sourcePrefixEntry contains match criteria per source prefix. -type sourcePrefixEntry struct { - // The actual destination prefix. Set to nil for unspecified prefixes. - net *net.IPNet - // Mapping from source ports specified in the match criteria to the actual - // filter chain. Unspecified source port matches en up as a wildcard entry - // here with a key of 0. - srcPortMap map[int]*FilterChain -} - -// NewFilterChainManager parses the received Listener resource and builds a -// FilterChainManager. Returns a non-nil error on validation failures. -// -// This function is only exported so that tests outside of this package can -// create a FilterChainManager. -func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { - // Parse all the filter chains and build the internal data structures. - fci := &FilterChainManager{ - dstPrefixMap: make(map[string]*destPrefixEntry), - RouteConfigNames: make(map[string]bool), - } - if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { - return nil, err - } - // Build the source and dest prefix slices used by Lookup(). - fcSeen := false - for _, dstPrefix := range fci.dstPrefixMap { - fci.dstPrefixes = append(fci.dstPrefixes, dstPrefix) - for _, st := range dstPrefix.srcTypeArr { - if st == nil { - continue - } - for _, srcPrefix := range st.srcPrefixMap { - st.srcPrefixes = append(st.srcPrefixes, srcPrefix) - for _, fc := range srcPrefix.srcPortMap { - if fc != nil { - fcSeen = true - } - } - } - } - } - - // Retrieve the default filter chain. The match criteria specified on the - // default filter chain is never used. The default filter chain simply gets - // used when none of the other filter chains match. - var def *FilterChain - if dfc := lis.GetDefaultFilterChain(); dfc != nil { - var err error - if def, err = fci.filterChainFromProto(dfc); err != nil { - return nil, err - } - } - fci.def = def - if fci.def != nil { - fci.fcs = append(fci.fcs, fci.def) - } - - // If there are no supported filter chains and no default filter chain, we - // fail here. This will call the Listener resource to be NACK'ed. - if !fcSeen && fci.def == nil { - return nil, fmt.Errorf("no supported filter chains and no default filter chain") - } - return fci, nil -} - -// addFilterChains parses the filter chains in fcs and adds the required -// internal data structures corresponding to the match criteria. -func (fcm *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) error { - for _, fc := range fcs { - fcMatch := fc.GetFilterChainMatch() - if fcMatch.GetDestinationPort().GetValue() != 0 { - // Destination port is the first match criteria and we do not - // support filter chains which contains this match criteria. - logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) - continue - } - - // Build the internal representation of the filter chain match fields. - if err := fcm.addFilterChainsForDestPrefixes(fc); err != nil { - return err - } - } - - return nil -} - -func (fcm *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.FilterChain) error { - ranges := fc.GetFilterChainMatch().GetPrefixRanges() - dstPrefixes := make([]*net.IPNet, 0, len(ranges)) - for _, pr := range ranges { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - _, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("failed to parse destination prefix range: %+v", pr) - } - dstPrefixes = append(dstPrefixes, ipnet) - } - - if len(dstPrefixes) == 0 { - // Use the unspecified entry when destination prefix is unspecified, and - // set the `net` field to nil. - if fcm.dstPrefixMap[unspecifiedPrefixMapKey] == nil { - fcm.dstPrefixMap[unspecifiedPrefixMapKey] = &destPrefixEntry{} - } - return fcm.addFilterChainsForServerNames(fcm.dstPrefixMap[unspecifiedPrefixMapKey], fc) - } - for _, prefix := range dstPrefixes { - p := prefix.String() - if fcm.dstPrefixMap[p] == nil { - fcm.dstPrefixMap[p] = &destPrefixEntry{net: prefix} - } - if err := fcm.addFilterChainsForServerNames(fcm.dstPrefixMap[p], fc); err != nil { - return err - } - } - return nil -} - -func (fcm *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { - // Filter chains specifying server names in their match criteria always fail - // a match at connection time. So, these filter chains can be dropped now. - if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { - logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) - return nil - } - - return fcm.addFilterChainsForTransportProtocols(dstEntry, fc) -} - -func (fcm *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { - tp := fc.GetFilterChainMatch().GetTransportProtocol() - switch { - case tp != "" && tp != "raw_buffer": - // Only allow filter chains with transport protocol set to empty string - // or "raw_buffer". - logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) - return nil - case tp == "" && dstEntry.rawBufferSeen: - // If we have already seen filter chains with transport protocol set to - // "raw_buffer", we can drop filter chains with transport protocol set - // to empty string, since the former takes precedence. - logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) - return nil - case tp != "" && !dstEntry.rawBufferSeen: - // This is the first "raw_buffer" that we are seeing. Set the bit and - // reset the source types array which might contain entries for filter - // chains with transport protocol set to empty string. - dstEntry.rawBufferSeen = true - dstEntry.srcTypeArr = sourceTypesArray{} - } - return fcm.addFilterChainsForApplicationProtocols(dstEntry, fc) -} - -func (fcm *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { - if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { - logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) - return nil - } - return fcm.addFilterChainsForSourceType(dstEntry, fc) -} - -// addFilterChainsForSourceType adds source types to the internal data -// structures and delegates control to addFilterChainsForSourcePrefixes to -// continue building the internal data structure. -func (fcm *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { - var srcType SourceType - switch st := fc.GetFilterChainMatch().GetSourceType(); st { - case v3listenerpb.FilterChainMatch_ANY: - srcType = SourceTypeAny - case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: - srcType = SourceTypeSameOrLoopback - case v3listenerpb.FilterChainMatch_EXTERNAL: - srcType = SourceTypeExternal - default: - return fmt.Errorf("unsupported source type: %v", st) - } - - st := int(srcType) - if dstEntry.srcTypeArr[st] == nil { - dstEntry.srcTypeArr[st] = &sourcePrefixes{srcPrefixMap: make(map[string]*sourcePrefixEntry)} - } - return fcm.addFilterChainsForSourcePrefixes(dstEntry.srcTypeArr[st].srcPrefixMap, fc) -} - -// addFilterChainsForSourcePrefixes adds source prefixes to the internal data -// structures and delegates control to addFilterChainsForSourcePorts to continue -// building the internal data structure. -func (fcm *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, fc *v3listenerpb.FilterChain) error { - ranges := fc.GetFilterChainMatch().GetSourcePrefixRanges() - srcPrefixes := make([]*net.IPNet, 0, len(ranges)) - for _, pr := range fc.GetFilterChainMatch().GetSourcePrefixRanges() { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - _, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("failed to parse source prefix range: %+v", pr) - } - srcPrefixes = append(srcPrefixes, ipnet) - } - - if len(srcPrefixes) == 0 { - // Use the unspecified entry when destination prefix is unspecified, and - // set the `net` field to nil. - if srcPrefixMap[unspecifiedPrefixMapKey] == nil { - srcPrefixMap[unspecifiedPrefixMapKey] = &sourcePrefixEntry{ - srcPortMap: make(map[int]*FilterChain), - } - } - return fcm.addFilterChainsForSourcePorts(srcPrefixMap[unspecifiedPrefixMapKey], fc) - } - for _, prefix := range srcPrefixes { - p := prefix.String() - if srcPrefixMap[p] == nil { - srcPrefixMap[p] = &sourcePrefixEntry{ - net: prefix, - srcPortMap: make(map[int]*FilterChain), - } - } - if err := fcm.addFilterChainsForSourcePorts(srcPrefixMap[p], fc); err != nil { - return err - } - } - return nil + // HTTPConnMgr contains the HTTP connection manager configuration. + HTTPConnMgr *HTTPConnectionManagerConfig } -// addFilterChainsForSourcePorts adds source ports to the internal data -// structures and completes the process of building the internal data structure. -// It is here that we determine if there are multiple filter chains with -// overlapping matching rules. -func (fcm *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, fcProto *v3listenerpb.FilterChain) error { - ports := fcProto.GetFilterChainMatch().GetSourcePorts() - srcPorts := make([]int, 0, len(ports)) - for _, port := range ports { - srcPorts = append(srcPorts, int(port)) - } - - fc, err := fcm.filterChainFromProto(fcProto) - if err != nil { - return err - } - - if len(srcPorts) == 0 { - // Use the wildcard port '0', when source ports are unspecified. - if curFC := srcEntry.srcPortMap[0]; curFC != nil { - return errors.New("multiple filter chains with overlapping matching rules are defined") - } - srcEntry.srcPortMap[0] = fc - fcm.fcs = append(fcm.fcs, fc) - return nil - } - for _, port := range srcPorts { - if curFC := srcEntry.srcPortMap[port]; curFC != nil { - return errors.New("multiple filter chains with overlapping matching rules are defined") - } - srcEntry.srcPortMap[port] = fc - } - fcm.fcs = append(fcm.fcs, fc) - return nil -} - -// FilterChains returns the filter chains for this filter chain manager. -func (fcm *FilterChainManager) FilterChains() []*FilterChain { - return fcm.fcs +// IsEmpty returns true if the NetworkFilterChainConfig contains no +// configuration. +func (n NetworkFilterChainConfig) IsEmpty() bool { + return n.SecurityCfg == nil && n.HTTPConnMgr == nil } -// filterChainFromProto extracts the relevant information from the FilterChain -// proto and stores it in our internal representation. It also persists any -// RouteNames which need to be queried dynamically via RDS. -func (fcm *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { - filterChain, err := processNetworkFilters(fc.GetFilters()) - if err != nil { - return nil, err - } - // These route names will be dynamically queried via RDS in the wrapped - // listener, which receives the LDS response, if specified for the filter - // chain. - if filterChain.RouteConfigName != "" { - fcm.RouteConfigNames[filterChain.RouteConfigName] = true - } - // If the transport_socket field is not specified, it means that the control - // plane has not sent us any security config. This is fine and the server - // will use the fallback credentials configured as part of the - // xdsCredentials. - ts := fc.GetTransportSocket() - if ts == nil { - return filterChain, nil - } - if name := ts.GetName(); name != transportSocketName { - return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) - } - tc := ts.GetTypedConfig() - if typeURL := tc.GetTypeUrl(); typeURL != version.V3DownstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket missing typed_config or wrong type_url: %q", typeURL) - } - downstreamCtx := &v3tlspb.DownstreamTlsContext{} - if err := proto.Unmarshal(tc.GetValue(), downstreamCtx); err != nil { - return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) - } - if downstreamCtx.GetRequireSni().GetValue() { - return nil, fmt.Errorf("require_sni field set to true in DownstreamTlsContext message: %v", downstreamCtx) - } - if downstreamCtx.GetOcspStaplePolicy() != v3tlspb.DownstreamTlsContext_LENIENT_STAPLING { - return nil, fmt.Errorf("ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message: %v", downstreamCtx) - } - // The following fields from `DownstreamTlsContext` are ignore: - // - disable_stateless_session_resumption - // - session_ticket_keys - // - session_ticket_keys_sds_secret_config - // - session_timeout - if downstreamCtx.GetCommonTlsContext() == nil { - return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") - } - sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext(), true) - if err != nil { - return nil, err - } - if sc == nil { - // sc == nil is a valid case where the control plane has not sent us any - // security configuration. xDS creds will use fallback creds. - return filterChain, nil - } - sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() - if sc.RequireClientCert && sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") - } - filterChain.SecurityCfg = sc - return filterChain, nil -} - -// Validate takes a function to validate the FilterChains in this manager. -func (fcm *FilterChainManager) Validate(f func(fc *FilterChain) error) error { - for _, dst := range fcm.dstPrefixMap { - for _, srcType := range dst.srcTypeArr { - if srcType == nil { - continue - } - for _, src := range srcType.srcPrefixMap { - for _, fc := range src.srcPortMap { - if err := f(fc); err != nil { - return err - } - } - } - } - } - return f(fcm.def) -} - -func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { - rc := &UsableRouteConfiguration{} - filterChain := &FilterChain{ - UsableRouteConfiguration: &atomic.Pointer[UsableRouteConfiguration]{}, - } - filterChain.UsableRouteConfiguration.Store(rc) +func processNetworkFilters(filters []*v3listenerpb.Filter) (*HTTPConnectionManagerConfig, error) { + hcmConfig := &HTTPConnectionManagerConfig{} seenNames := make(map[string]bool, len(filters)) seenHCM := false for _, filter := range filters { @@ -661,7 +138,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) } // TODO: Implement terminal filter logic, as per A36. - filterChain.HTTPFilters = filters + hcmConfig.HTTPFilters = filters seenHCM = true switch hcm.RouteSpecifier.(type) { case *v3httppb.HttpConnectionManager_Rds: @@ -672,7 +149,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) if name == "" { return nil, fmt.Errorf("empty route_config_name: %+v", hcm) } - filterChain.RouteConfigName = name + hcmConfig.RouteConfigName = name case *v3httppb.HttpConnectionManager_RouteConfig: // "RouteConfiguration validation logic inherits all // previous validations made for client-side usage as RDS @@ -684,7 +161,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } - filterChain.InlineRouteConfig = &routeU + hcmConfig.InlineRouteConfig = &routeU case nil: return nil, fmt.Errorf("no RouteSpecifier: %+v", hcm) default: @@ -698,203 +175,5 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) if !seenHCM { return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) } - return filterChain, nil -} - -// FilterChainLookupParams wraps parameters to be passed to Lookup. -type FilterChainLookupParams struct { - // IsUnspecified indicates whether the server is listening on a wildcard - // address, "0.0.0.0" for IPv4 and "::" for IPv6. Only when this is set to - // true, do we consider the destination prefixes specified in the filter - // chain match criteria. - IsUnspecifiedListener bool - // DestAddr is the local address of an incoming connection. - DestAddr net.IP - // SourceAddr is the remote address of an incoming connection. - SourceAddr net.IP - // SourcePort is the remote port of an incoming connection. - SourcePort int -} - -// Lookup returns the most specific matching filter chain to be used for an -// incoming connection on the server side. -// -// Returns a non-nil error if no matching filter chain could be found or -// multiple matching filter chains were found, and in both cases, the incoming -// connection must be dropped. -func (fcm *FilterChainManager) Lookup(params FilterChainLookupParams) (*FilterChain, error) { - dstPrefixes := filterByDestinationPrefixes(fcm.dstPrefixes, params.IsUnspecifiedListener, params.DestAddr) - if len(dstPrefixes) == 0 { - if fcm.def != nil { - return fcm.def, nil - } - return nil, fmt.Errorf("no matching filter chain based on destination prefix match for %+v", params) - } - - srcType := SourceTypeExternal - if params.SourceAddr.Equal(params.DestAddr) || params.SourceAddr.IsLoopback() { - srcType = SourceTypeSameOrLoopback - } - srcPrefixes := filterBySourceType(dstPrefixes, srcType) - if len(srcPrefixes) == 0 { - if fcm.def != nil { - return fcm.def, nil - } - return nil, fmt.Errorf("no matching filter chain based on source type match for %+v", params) - } - srcPrefixEntry, err := filterBySourcePrefixes(srcPrefixes, params.SourceAddr) - if err != nil { - return nil, err - } - if fc := filterBySourcePorts(srcPrefixEntry, params.SourcePort); fc != nil { - return fc, nil - } - if fcm.def != nil { - return fcm.def, nil - } - return nil, fmt.Errorf("no matching filter chain after all match criteria for %+v", params) -} - -// filterByDestinationPrefixes is the first stage of the filter chain -// matching algorithm. It takes the complete set of configured filter chain -// matchers and returns the most specific matchers based on the destination -// prefix match criteria (the prefixes which match the most number of bits). -func filterByDestinationPrefixes(dstPrefixes []*destPrefixEntry, isUnspecified bool, dstAddr net.IP) []*destPrefixEntry { - if !isUnspecified { - // Destination prefix matchers are considered only when the listener is - // bound to the wildcard address. - return dstPrefixes - } - - var matchingDstPrefixes []*destPrefixEntry - maxSubnetMatch := noPrefixMatch - for _, prefix := range dstPrefixes { - if prefix.net != nil && !prefix.net.Contains(dstAddr) { - // Skip prefixes which don't match. - continue - } - // For unspecified prefixes, since we do not store a real net.IPNet - // inside prefix, we do not perform a match. Instead we simply set - // the matchSize to -1, which is less than the matchSize (0) for a - // wildcard prefix. - matchSize := unspecifiedPrefixMatch - if prefix.net != nil { - matchSize, _ = prefix.net.Mask.Size() - } - if matchSize < maxSubnetMatch { - continue - } - if matchSize > maxSubnetMatch { - maxSubnetMatch = matchSize - matchingDstPrefixes = make([]*destPrefixEntry, 0, 1) - } - matchingDstPrefixes = append(matchingDstPrefixes, prefix) - } - return matchingDstPrefixes -} - -// filterBySourceType is the second stage of the matching algorithm. It -// trims the filter chains based on the most specific source type match. -func filterBySourceType(dstPrefixes []*destPrefixEntry, srcType SourceType) []*sourcePrefixes { - var ( - srcPrefixes []*sourcePrefixes - bestSrcTypeMatch int - ) - for _, prefix := range dstPrefixes { - var ( - srcPrefix *sourcePrefixes - match int - ) - switch srcType { - case SourceTypeExternal: - match = int(SourceTypeExternal) - srcPrefix = prefix.srcTypeArr[match] - case SourceTypeSameOrLoopback: - match = int(SourceTypeSameOrLoopback) - srcPrefix = prefix.srcTypeArr[match] - } - if srcPrefix == nil { - match = int(SourceTypeAny) - srcPrefix = prefix.srcTypeArr[match] - } - if match < bestSrcTypeMatch { - continue - } - if match > bestSrcTypeMatch { - bestSrcTypeMatch = match - srcPrefixes = make([]*sourcePrefixes, 0) - } - if srcPrefix != nil { - // The source type array always has 3 entries, but these could be - // nil if the appropriate source type match was not specified. - srcPrefixes = append(srcPrefixes, srcPrefix) - } - } - return srcPrefixes -} - -// filterBySourcePrefixes is the third stage of the filter chain matching -// algorithm. It trims the filter chains based on the source prefix. At most one -// filter chain with the most specific match progress to the next stage. -func filterBySourcePrefixes(srcPrefixes []*sourcePrefixes, srcAddr net.IP) (*sourcePrefixEntry, error) { - var matchingSrcPrefixes []*sourcePrefixEntry - maxSubnetMatch := noPrefixMatch - for _, sp := range srcPrefixes { - for _, prefix := range sp.srcPrefixes { - if prefix.net != nil && !prefix.net.Contains(srcAddr) { - // Skip prefixes which don't match. - continue - } - // For unspecified prefixes, since we do not store a real net.IPNet - // inside prefix, we do not perform a match. Instead we simply set - // the matchSize to -1, which is less than the matchSize (0) for a - // wildcard prefix. - matchSize := unspecifiedPrefixMatch - if prefix.net != nil { - matchSize, _ = prefix.net.Mask.Size() - } - if matchSize < maxSubnetMatch { - continue - } - if matchSize > maxSubnetMatch { - maxSubnetMatch = matchSize - matchingSrcPrefixes = make([]*sourcePrefixEntry, 0, 1) - } - matchingSrcPrefixes = append(matchingSrcPrefixes, prefix) - } - } - if len(matchingSrcPrefixes) == 0 { - // Finding no match is not an error condition. The caller will end up - // using the default filter chain if one was configured. - return nil, nil - } - // We expect at most a single matching source prefix entry at this point. If - // we have multiple entries here, and some of their source port matchers had - // wildcard entries, we could be left with more than one matching filter - // chain and hence would have been flagged as an invalid configuration at - // config validation time. - if len(matchingSrcPrefixes) != 1 { - return nil, errors.New("multiple matching filter chains") - } - return matchingSrcPrefixes[0], nil -} - -// filterBySourcePorts is the last stage of the filter chain matching -// algorithm. It trims the filter chains based on the source ports. -func filterBySourcePorts(spe *sourcePrefixEntry, srcPort int) *FilterChain { - if spe == nil { - return nil - } - // A match could be a wildcard match (this happens when the match - // criteria does not specify source ports) or a specific port match (this - // happens when the match criteria specifies a set of ports and the source - // port of the incoming connection matches one of the specified ports). The - // latter is considered to be a more specific match. - if fc := spe.srcPortMap[srcPort]; fc != nil { - return fc - } - if fc := spe.srcPortMap[0]; fc != nil { - return fc - } - return nil + return hcmConfig, nil } diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/listener_resource_type.go index 2b9000fdf2..71d529f92f 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/listener_resource_type.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/listener_resource_type.go @@ -81,15 +81,35 @@ func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { } func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { - if lis.InboundListenerCfg == nil || lis.InboundListenerCfg.FilterChains == nil { - return nil - } - return lis.InboundListenerCfg.FilterChains.Validate(func(fc *FilterChain) error { + // Validate Filter Chains. + validateFC := func(fc *NetworkFilterChainConfig) error { if fc == nil { return nil } return securityConfigValidator(bc, fc.SecurityCfg) - }) + } + + if lis.TCPListener == nil { + return nil + } + if err := validateFC(&lis.TCPListener.DefaultFilterChain); err != nil { + return err + } + for _, dst := range lis.TCPListener.FilterChains.DstPrefixes { + for _, srcType := range dst.SourceTypeArr { + if len(srcType.Entries) == 0 { + continue + } + for _, src := range srcType.Entries { + for _, fc := range src.PortMap { + if err := validateFC(&fc); err != nil { + return err + } + } + } + } + } + return nil } // ListenerResourceData is an implementation of the xdsclient.ResourceData diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/matcher.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/matcher.go index cedfa58622..c9202fc31e 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/matcher.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/matcher.go @@ -253,31 +253,3 @@ func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHos } return matchVh } - -// FindBestMatchingVirtualHostServer returns the virtual host whose domains field best -// matches authority. -func FindBestMatchingVirtualHostServer(authority string, vHosts []VirtualHostWithInterceptors) *VirtualHostWithInterceptors { - var ( - matchVh *VirtualHostWithInterceptors - matchType = domainMatchTypeInvalid - matchLen int - ) - for _, vh := range vHosts { - for _, domain := range vh.Domains { - typ, matched := match(domain, authority) - if typ == domainMatchTypeInvalid { - // The rds response is invalid. - return nil - } - if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { - // The previous match has better type, or the previous match has - // better length, or this domain isn't a match. - continue - } - matchVh = &vh - matchType = typ - matchLen = len(domain) - } - } - return matchVh -} diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/metadata.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/metadata.go index b56c309d6f..7c0b56e82f 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/metadata.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/metadata.go @@ -22,11 +22,14 @@ import ( "net/netip" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/protobuf/types/known/anypb" ) func init() { - registerMetadataConverter("type.googleapis.com/envoy.config.core.v3.Address", proxyAddressConvertor{}) + if envconfig.XDSHTTPConnectEnabled { + registerMetadataConverter("type.googleapis.com/envoy.config.core.v3.Address", proxyAddressConvertor{}) + } } var ( @@ -53,6 +56,12 @@ func metadataConverterForType(typeURL string) metadataConverter { return metdataRegistry[typeURL] } +// unregisterMetadataConverterForTesting removes a converter from the registry. +// For testing only. +func unregisterMetadataConverterForTesting(typeURL string) { + delete(metdataRegistry, typeURL) +} + // StructMetadataValue stores the values in a google.protobuf.Struct from // FilterMetadata. type StructMetadataValue struct { diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/type_lds.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/type_lds.go index a2e057b0e7..55118d0851 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/type_lds.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/type_lds.go @@ -27,6 +27,17 @@ import ( // ListenerUpdate contains information received in an LDS response, which is of // interest to the registered LDS watcher. type ListenerUpdate struct { + // APIListener contains the HTTP connection manager configuration. + APIListener *HTTPConnectionManagerConfig + // TCPListener contains inbound listener configuration. + TCPListener *InboundListenerConfig + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// HTTPConnectionManagerConfig contains the HTTP connection manager configuration. +type HTTPConnectionManagerConfig struct { // RouteConfigName is the route configuration name corresponding to the // target which is being watched through LDS. // @@ -45,11 +56,6 @@ type ListenerUpdate struct { // HTTPFilters is a list of HTTP filters (name, config) from the LDS // response. HTTPFilters []HTTPFilter - // InboundListenerCfg contains inbound listener configuration. - InboundListenerCfg *InboundListenerConfig - - // Raw is the resource from the xds response. - Raw *anypb.Any } // HTTPFilter represents one HTTP filter from an LDS response's HTTP connection @@ -74,6 +80,10 @@ type InboundListenerConfig struct { // Port is the local port on which the inbound listener is expected to // accept incoming connections. Port string - // FilterChains is the list of filter chains associated with this listener. - FilterChains *FilterChainManager + + // DefaultFilterChain is the default filter chain to use if no other filter + // chain matches. + DefaultFilterChain NetworkFilterChainConfig + // FilterChains contains the filter chains associated with this listener. + FilterChains NetworkFilterChainMap } diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_eds.go index a202c9e022..581cbf6f72 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_eds.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_eds.go @@ -109,6 +109,7 @@ func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropO func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs map[string]bool) ([]Endpoint, error) { endpoints := make([]Endpoint, 0, len(lbEndpoints)) + var totalWeight uint64 for _, lbEndpoint := range lbEndpoints { // If the load_balancing_weight field is specified, it must be set to a // value of at least 1. If unspecified, each host is presumed to have @@ -120,6 +121,12 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs } weight = w.GetValue() } + + totalWeight += uint64(weight) + if totalWeight > math.MaxUint32 { + return nil, fmt.Errorf("sum of weights of endpoints in the same locality exceeds maximum value %d", uint64(math.MaxUint32)) + } + addrs := []string{parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress())} if envconfig.XDSDualstackEndpointsEnabled { for _, sa := range lbEndpoint.GetEndpoint().GetAdditionalAddresses() { @@ -278,7 +285,7 @@ func validateAndConstructMetadata(metadataProto *v3corepb.Metadata) (map[string] // Process FilterMetadata for any keys not already handled. for key, structProto := range metadataProto.GetFilterMetadata() { - // Skip keys already added from TyperFilterMetadata. + // Skip keys already added from TypedFilterMetadata. if metadata[key] != nil { continue } diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_lds.go b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_lds.go index 604c086ba4..95816d146c 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_lds.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsclient/xdsresource/unmarshal_lds.go @@ -18,8 +18,10 @@ package xdsresource import ( + "bytes" "errors" "fmt" + "net" "strconv" v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" @@ -27,8 +29,10 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "google.golang.org/grpc/internal/xds/clients/xdsclient" "google.golang.org/grpc/internal/xds/httpfilter" + "google.golang.org/grpc/internal/xds/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -65,8 +69,6 @@ func processListener(lis *v3listenerpb.Listener, opts *xdsclient.DecodeOptions) // processClientSideListener checks if the provided Listener proto meets // the expected criteria. If so, it returns a non-empty routeConfigName. func processClientSideListener(lis *v3listenerpb.Listener, opts *xdsclient.DecodeOptions) (*ListenerUpdate, error) { - update := &ListenerUpdate{} - apiLisAny := lis.GetApiListener().GetApiListener() if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { return nil, fmt.Errorf("unexpected http connection manager resource type: %q", apiLisAny.GetTypeUrl()) @@ -85,6 +87,7 @@ func processClientSideListener(lis *v3listenerpb.Listener, opts *xdsclient.Decod return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) } + hcm := &HTTPConnectionManagerConfig{} switch apiLis.RouteSpecifier.(type) { case *v3httppb.HttpConnectionManager_Rds: if configsource := apiLis.GetRds().GetConfigSource(); configsource.GetAds() == nil && configsource.GetSelf() == nil { @@ -94,13 +97,13 @@ func processClientSideListener(lis *v3listenerpb.Listener, opts *xdsclient.Decod if name == "" { return nil, fmt.Errorf("empty route_config_name: %+v", lis) } - update.RouteConfigName = name + hcm.RouteConfigName = name case *v3httppb.HttpConnectionManager_RouteConfig: routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), opts) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } - update.InlineRouteConfig = &routeU + hcm.InlineRouteConfig = &routeU case nil: return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) default: @@ -109,14 +112,14 @@ func processClientSideListener(lis *v3listenerpb.Listener, opts *xdsclient.Decod // The following checks and fields only apply to xDS protocol versions v3+. - update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() + hcm.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() var err error - if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { + if hcm.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { return nil, err } - return update, nil + return &ListenerUpdate{APIListener: hcm}, nil } func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { @@ -263,16 +266,333 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) } lu := &ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ + TCPListener: &InboundListenerConfig{ Address: sockAddr.GetAddress(), Port: strconv.Itoa(int(sockAddr.GetPortValue())), }, } - fcMgr, err := NewFilterChainManager(lis) + // Populate the default filter chain. + if dfc := lis.GetDefaultFilterChain(); dfc != nil { + fc, err := filterChainFromProto(dfc) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal default filter chain: %v", err) + } + lu.TCPListener.DefaultFilterChain = fc + } + + // Populated the filter chain map. + fcm, err := buildFilterChainMap(lis.GetFilterChains()) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to unmarshal filter chains: %v", err) + } + lu.TCPListener.FilterChains = fcm + + // If there are no supported filter chains and no default filter chain, we + // fail here. This will cause the Listener resource to be NACK'ed. + if len(lu.TCPListener.FilterChains.DstPrefixes) == 0 && lu.TCPListener.DefaultFilterChain.IsEmpty() { + return nil, fmt.Errorf("no supported filter chains and no default filter chain") } - lu.InboundListenerCfg.FilterChains = fcMgr + return lu, nil } + +func filterChainFromProto(fc *v3listenerpb.FilterChain) (NetworkFilterChainConfig, error) { + var emptyFilterChain NetworkFilterChainConfig + + hcmConfig, err := processNetworkFilters(fc.GetFilters()) + if err != nil { + return emptyFilterChain, err + } + + fcc := NetworkFilterChainConfig{HTTPConnMgr: hcmConfig} + // If the transport_socket field is not specified, it means that the control + // plane has not sent us any security config. This is fine and the server + // will use the fallback credentials configured as part of the + // xdsCredentials. + ts := fc.GetTransportSocket() + if ts == nil { + return fcc, nil + } + if name := ts.GetName(); name != transportSocketName { + return emptyFilterChain, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + tc := ts.GetTypedConfig() + if typeURL := tc.GetTypeUrl(); typeURL != version.V3DownstreamTLSContextURL { + return emptyFilterChain, fmt.Errorf("transport_socket missing typed_config or wrong type_url: %q", typeURL) + } + downstreamCtx := &v3tlspb.DownstreamTlsContext{} + if err := proto.Unmarshal(tc.GetValue(), downstreamCtx); err != nil { + return emptyFilterChain, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) + } + if downstreamCtx.GetRequireSni().GetValue() { + return emptyFilterChain, fmt.Errorf("require_sni field set to true in DownstreamTlsContext message: %v", downstreamCtx) + } + if downstreamCtx.GetOcspStaplePolicy() != v3tlspb.DownstreamTlsContext_LENIENT_STAPLING { + return emptyFilterChain, fmt.Errorf("ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message: %v", downstreamCtx) + } + if downstreamCtx.GetCommonTlsContext() == nil { + return emptyFilterChain, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") + } + sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext(), true) + if err != nil { + return emptyFilterChain, err + } + if sc != nil { + sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() + if sc.RequireClientCert && sc.RootInstanceName == "" { + return emptyFilterChain, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") + } + fcc.SecurityCfg = sc + } + return fcc, nil +} + +// dstPrefixEntry wraps DestinationPrefixEntry to track build state. +type dstPrefixEntry struct { + entry DestinationPrefixEntry + rawBufferSeen bool +} + +func buildFilterChainMap(fcs []*v3listenerpb.FilterChain) (NetworkFilterChainMap, error) { + dstPrefixEntries := []*dstPrefixEntry{} + for _, fc := range fcs { + fcMatch := fc.GetFilterChainMatch() + if fcMatch.GetDestinationPort().GetValue() != 0 { + // Destination port is the first match criteria and we do not + // support filter chains that contain this match criteria. + logger.Warningf("Dropping filter chain %q since it contains unsupported destination_port match field", fc.GetName()) + continue + } + + var err error + dstPrefixEntries, err = addFilterChainsForDestPrefixes(dstPrefixEntries, fc) + if err != nil { + return NetworkFilterChainMap{}, err + } + } + + entries := []DestinationPrefixEntry{} + for _, bEntry := range dstPrefixEntries { + fcSeen := false + for _, srcPrefixes := range bEntry.entry.SourceTypeArr { + if len(srcPrefixes.Entries) == 0 { + continue + } + for _, srcPrefix := range srcPrefixes.Entries { + for _, fc := range srcPrefix.PortMap { + if !fc.IsEmpty() { + fcSeen = true + } + } + } + } + if fcSeen { + entries = append(entries, bEntry.entry) + } + } + return NetworkFilterChainMap{DstPrefixes: entries}, nil +} + +func addFilterChainsForDestPrefixes(dstPrefixEntries []*dstPrefixEntry, fc *v3listenerpb.FilterChain) ([]*dstPrefixEntry, error) { + ranges := fc.GetFilterChainMatch().GetPrefixRanges() + dstPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range ranges { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return nil, fmt.Errorf("failed to parse destination prefix range: %+v", pr) + } + dstPrefixes = append(dstPrefixes, ipnet) + } + + var entry *dstPrefixEntry + if len(dstPrefixes) == 0 { + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + dstPrefixEntries, entry = getOrCreateDestPrefixEntry(dstPrefixEntries, nil) + if err := addFilterChainsForServerNames(entry, fc); err != nil { + return nil, err + } + return dstPrefixEntries, nil + } + for _, prefix := range dstPrefixes { + dstPrefixEntries, entry = getOrCreateDestPrefixEntry(dstPrefixEntries, prefix) + if err := addFilterChainsForServerNames(entry, fc); err != nil { + return nil, err + } + } + return dstPrefixEntries, nil +} + +// getOrCreateDestPrefixEntry looks for an existing dstPrefixEntry in the +// provided slice with the same destination prefix as the provided prefix. If +// such an entry is found, it is returned. Otherwise, a new entry is created and +// appended to the slice, and the new entry is returned. +func getOrCreateDestPrefixEntry(dstPrefixEntries []*dstPrefixEntry, prefix *net.IPNet) ([]*dstPrefixEntry, *dstPrefixEntry) { + for _, e := range dstPrefixEntries { + if ipNetEqual(e.entry.Prefix, prefix) { + return dstPrefixEntries, e + } + } + entry := &dstPrefixEntry{entry: DestinationPrefixEntry{Prefix: prefix}} + dstPrefixEntries = append(dstPrefixEntries, entry) + return dstPrefixEntries, entry +} + +func addFilterChainsForServerNames(dstEntry *dstPrefixEntry, fc *v3listenerpb.FilterChain) error { + // Filter chains specifying server names in their match criteria always fail + // a match at connection time. So, these filter chains can be dropped now. + if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { + logger.Warningf("Dropping filter chain %q since it contains unsupported server_names match field", fc.GetName()) + return nil + } + + return addFilterChainsForTransportProtocols(dstEntry, fc) +} + +func addFilterChainsForTransportProtocols(dstEntry *dstPrefixEntry, fc *v3listenerpb.FilterChain) error { + tp := fc.GetFilterChainMatch().GetTransportProtocol() + switch { + case tp != "" && tp != "raw_buffer": + // Only allow filter chains with transport protocol set to empty string + // or "raw_buffer". + logger.Warningf("Dropping filter chain %q since it contains unsupported value for transport_protocol match field", fc.GetName()) + return nil + case tp == "" && dstEntry.rawBufferSeen: + // If we have already seen filter chains with transport protocol set to + // "raw_buffer", we can drop filter chains with transport protocol set + // to empty string, since the former takes precedence. + logger.Warningf("Dropping filter chain %q since it contains empty string for transport_protocol match field, but one with raw_buffer already exists", fc.GetName()) + return nil + case tp != "" && !dstEntry.rawBufferSeen: + // This is the first "raw_buffer" that we are seeing. Set the bit and + // reset the source types array which might contain entries for filter + // chains with transport protocol set to empty string. + dstEntry.rawBufferSeen = true + dstEntry.entry.SourceTypeArr = [3]SourcePrefixes{} + } + return addFilterChainsForApplicationProtocols(dstEntry, fc) +} + +func addFilterChainsForApplicationProtocols(dstEntry *dstPrefixEntry, fc *v3listenerpb.FilterChain) error { + if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { + logger.Warningf("Dropping filter chain %q since it contains unsupported application_protocols match field", fc.GetName()) + return nil + } + return addFilterChainsForSourceType(&dstEntry.entry, fc) +} + +// sourceType specifies the connection source IP match type. +type sourceType int + +const ( + // sourceTypeAny matches connection attempts from any source. + sourceTypeAny sourceType = iota + // sourceTypeSameOrLoopback matches connection attempts from the same host. + sourceTypeSameOrLoopback + // sourceTypeExternal matches connection attempts from a different host. + sourceTypeExternal +) + +func addFilterChainsForSourceType(entry *DestinationPrefixEntry, fc *v3listenerpb.FilterChain) error { + var srcType sourceType + switch st := fc.GetFilterChainMatch().GetSourceType(); st { + case v3listenerpb.FilterChainMatch_ANY: + srcType = sourceTypeAny + case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: + srcType = sourceTypeSameOrLoopback + case v3listenerpb.FilterChainMatch_EXTERNAL: + srcType = sourceTypeExternal + default: + return fmt.Errorf("unsupported source type: %v", st) + } + + return addFilterChainsForSourcePrefixes(&entry.SourceTypeArr[srcType], fc) +} + +func addFilterChainsForSourcePrefixes(srcPrefixes *SourcePrefixes, fc *v3listenerpb.FilterChain) error { + ranges := fc.GetFilterChainMatch().GetSourcePrefixRanges() + prefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range ranges { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse source prefix range: %+v", pr) + } + prefixes = append(prefixes, ipnet) + } + + if len(prefixes) == 0 { + return getOrCreateSourcePrefixEntry(srcPrefixes, nil, fc) + } + for _, prefix := range prefixes { + if err := getOrCreateSourcePrefixEntry(srcPrefixes, prefix, fc); err != nil { + return err + } + } + return nil +} + +// getOrCreateSourcePrefixEntry looks for an existing SourcePrefixEntry in the +// provided SourcePrefixes with the same source prefix as the provided prefix. If +// such an entry is found, the provided filter chain is added to the entry and +// nil is returned. Otherwise, a new entry is created and appended to the +// SourcePrefixes, the provided filter chain is added to the new entry, and nil +// is returned. If there are multiple filter chains with overlapping matching +// rules, an error is returned. +func getOrCreateSourcePrefixEntry(srcPrefixes *SourcePrefixes, prefix *net.IPNet, fc *v3listenerpb.FilterChain) error { + for i := range srcPrefixes.Entries { + if ipNetEqual(srcPrefixes.Entries[i].Prefix, prefix) { + return addFilterChainsForSourcePorts(&srcPrefixes.Entries[i], fc) + } + } + + // Not found, create a new entry. + srcPrefixes.Entries = append(srcPrefixes.Entries, SourcePrefixEntry{ + Prefix: prefix, + PortMap: make(map[int]NetworkFilterChainConfig), + }) + return addFilterChainsForSourcePorts(&srcPrefixes.Entries[len(srcPrefixes.Entries)-1], fc) +} + +func addFilterChainsForSourcePorts(entry *SourcePrefixEntry, fc *v3listenerpb.FilterChain) error { + ports := fc.GetFilterChainMatch().GetSourcePorts() + srcPorts := make([]int, 0, len(ports)) + for _, port := range ports { + srcPorts = append(srcPorts, int(port)) + } + + if len(srcPorts) == 0 { + if !entry.PortMap[0].IsEmpty() { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + fcc, err := filterChainFromProto(fc) + if err != nil { + return err + } + entry.PortMap[0] = fcc + return nil + } + for _, port := range srcPorts { + if !entry.PortMap[port].IsEmpty() { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + fcc, err := filterChainFromProto(fc) + if err != nil { + return err + } + entry.PortMap[port] = fcc + } + return nil +} + +func ipNetEqual(a, b *net.IPNet) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) +} diff --git a/vendor/google.golang.org/grpc/internal/xds/xdsdepmgr/xds_dependency_manager.go b/vendor/google.golang.org/grpc/internal/xds/xdsdepmgr/xds_dependency_manager.go index 4dd792fccc..7ce0218440 100644 --- a/vendor/google.golang.org/grpc/internal/xds/xdsdepmgr/xds_dependency_manager.go +++ b/vendor/google.golang.org/grpc/internal/xds/xdsdepmgr/xds_dependency_manager.go @@ -21,6 +21,7 @@ package xdsdepmgr import ( "context" "fmt" + "maps" "net/url" "sync" @@ -37,10 +38,6 @@ const prefix = "[xdsdepmgr %p] " var logger = grpclog.Component("xds") -// EnableClusterAndEndpointsWatch is a flag used to control whether the CDS/EDS -// watchers in the dependency manager should be used. -var EnableClusterAndEndpointsWatch = false - func prefixLogger(p *DependencyManager) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } @@ -87,10 +84,6 @@ func (x *xdsResourceState[T, U]) setLastError(err error) { x.lastUpdate = nil } -func (x *xdsResourceState[T, U]) updateLastError(err error) { - x.lastErr = err -} - type dnsExtras struct { dnsR resolver.Resolver } @@ -120,15 +113,16 @@ type DependencyManager struct { dnsSerializerCancel func() // All the fields below are protected by mu. - mu sync.Mutex - stopped bool - listenerWatcher *xdsResourceState[xdsresource.ListenerUpdate, struct{}] - rdsResourceName string - routeConfigWatcher *xdsResourceState[xdsresource.RouteConfigUpdate, routeExtras] - clustersFromRouteConfig map[string]bool - clusterWatchers map[string]*xdsResourceState[xdsresource.ClusterUpdate, struct{}] - endpointWatchers map[string]*xdsResourceState[xdsresource.EndpointsUpdate, struct{}] - dnsResolvers map[string]*xdsResourceState[xdsresource.DNSUpdate, dnsExtras] + mu sync.Mutex + stopped bool + listenerWatcher *xdsResourceState[xdsresource.ListenerUpdate, struct{}] + rdsResourceName string + routeConfigWatcher *xdsResourceState[xdsresource.RouteConfigUpdate, routeExtras] + clustersFromLastRouteConfig map[string]bool + clusterWatchers map[string]*xdsResourceState[xdsresource.ClusterUpdate, struct{}] + endpointWatchers map[string]*xdsResourceState[xdsresource.EndpointsUpdate, struct{}] + dnsResolvers map[string]*xdsResourceState[xdsresource.DNSUpdate, dnsExtras] + clusterSubscriptions map[string]*clusterRef } // New creates a new DependencyManager. @@ -143,22 +137,25 @@ type DependencyManager struct { func New(listenerName, dataplaneAuthority string, xdsClient xdsclient.XDSClient, watcher ConfigWatcher) *DependencyManager { ctx, cancel := context.WithCancel(context.Background()) dm := &DependencyManager{ - ldsResourceName: listenerName, - dataplaneAuthority: dataplaneAuthority, - xdsClient: xdsClient, - watcher: watcher, - nodeID: xdsClient.BootstrapConfig().Node().GetId(), - dnsSerializer: grpcsync.NewCallbackSerializer(ctx), - dnsSerializerCancel: cancel, - clustersFromRouteConfig: make(map[string]bool), - endpointWatchers: make(map[string]*xdsResourceState[xdsresource.EndpointsUpdate, struct{}]), - dnsResolvers: make(map[string]*xdsResourceState[xdsresource.DNSUpdate, dnsExtras]), - clusterWatchers: make(map[string]*xdsResourceState[xdsresource.ClusterUpdate, struct{}]), + ldsResourceName: listenerName, + dataplaneAuthority: dataplaneAuthority, + xdsClient: xdsClient, + watcher: watcher, + nodeID: xdsClient.BootstrapConfig().Node().GetId(), + dnsSerializer: grpcsync.NewCallbackSerializer(ctx), + dnsSerializerCancel: cancel, + clustersFromLastRouteConfig: make(map[string]bool), + endpointWatchers: make(map[string]*xdsResourceState[xdsresource.EndpointsUpdate, struct{}]), + dnsResolvers: make(map[string]*xdsResourceState[xdsresource.DNSUpdate, dnsExtras]), + clusterWatchers: make(map[string]*xdsResourceState[xdsresource.ClusterUpdate, struct{}]), + clusterSubscriptions: make(map[string]*clusterRef), } dm.logger = prefixLogger(dm) - // Start the listener watch. Listener watch will start the other resource - // watches as needed. + // The dependency manager starts by watching the listener resource and + // discovers other resources as required. For example, the listener resource + // will contain the name of the route configuration resource, which will be + // subsequently watched.œ dm.listenerWatcher = &xdsResourceState[xdsresource.ListenerUpdate, struct{}]{} lw := &xdsResourceWatcher[xdsresource.ListenerUpdate]{ onUpdate: func(update *xdsresource.ListenerUpdate, onDone func()) { @@ -227,18 +224,17 @@ func (m *DependencyManager) maybeSendUpdateLocked() { Clusters: make(map[string]*xdsresource.ClusterResult), } - if !EnableClusterAndEndpointsWatch { - m.watcher.Update(config) - return - } - edsResourcesSeen := make(map[string]bool) dnsResourcesSeen := make(map[string]bool) clusterResourcesSeen := make(map[string]bool) haveAllResources := true - for cluster := range m.clustersFromRouteConfig { - ok, leafClusters, err := m.populateClusterConfigLocked(cluster, 0, config.Clusters, edsResourcesSeen, dnsResourcesSeen, clusterResourcesSeen) - if !ok { + + // Start watches for all clusters. Wait for all the clusters with static + // reference(from route config) to be resolved before sending the update. + for cluster := range m.clusterSubscriptions { + clusterConfig := make(map[string]*xdsresource.ClusterResult) + ok, leafClusters, err := m.populateClusterConfigLocked(cluster, 0, clusterConfig, edsResourcesSeen, dnsResourcesSeen, clusterResourcesSeen) + if !ok && m.clusterSubscriptions[cluster].staticRefCount > 0 { haveAllResources = false } // If there are no leaf clusters, add that as error. @@ -248,6 +244,12 @@ func (m *DependencyManager) maybeSendUpdateLocked() { if err != nil { config.Clusters[cluster] = &xdsresource.ClusterResult{Err: err} } + // Only if all the dependencies for the cluster is resolved, add the + // clusters to the config. This is to ensure we do not send partial + // updates for dynamic clusters. + if ok { + maps.Copy(config.Clusters, clusterConfig) + } } // Cancel resources not seen in the tree. @@ -448,29 +450,50 @@ func (m *DependencyManager) applyRouteConfigUpdateLocked(update *xdsresource.Rou m.routeConfigWatcher.setLastUpdate(update) m.routeConfigWatcher.extras.virtualHost = matchVH - if EnableClusterAndEndpointsWatch { - // Get the clusters to be watched from the routes in the virtual host. - // If the CLusterSpecifierField is set, we ignore it for now as the - // clusters will be determined dynamically for it. - newClusters := make(map[string]bool) + // Get the clusters to be watched from the routes in the virtual host. + // If the ClusterSpecifierPlugin field is set, we ignore it for now as the + // clusters will be determined dynamically for it. + newClusters := make(map[string]bool) + for _, rt := range matchVH.Routes { + for _, cluster := range rt.WeightedClusters { + newClusters[cluster.Name] = true + } + } - for _, rt := range matchVH.Routes { - for _, cluster := range rt.WeightedClusters { - newClusters[cluster.Name] = true - } + // Add subscriptions for all new clusters. + for cluster := range newClusters { + // If the cluster already has a reference, increase its static + // reference. + if sub, ok := m.clusterSubscriptions[cluster]; ok { + sub.staticRefCount++ + continue } - // Cancel watch for clusters not seen in route config - for name := range m.clustersFromRouteConfig { - if _, ok := newClusters[name]; !ok { - m.clusterWatchers[name].stop() - delete(m.clusterWatchers, name) - } + // If cluster is not present in subscriptions, add it with static + // ref count as 1. + m.clusterSubscriptions[cluster] = &clusterRef{ + staticRefCount: 1, } + } - // Watch for new clusters is started in populateClusterConfigLocked to - // avoid repeating the code. - m.clustersFromRouteConfig = newClusters + // Unsubscribe to clusters from last route config. + for cluster := range m.clustersFromLastRouteConfig { + clusterRef, ok := m.clusterSubscriptions[cluster] + if !ok { + // Should not reach here as the cluster was present in last + // route config so should be present in current cluster + // subscriptions. + continue + } + clusterRef.staticRefCount-- + if clusterRef.staticRefCount == 0 && clusterRef.dynamicRefCount == 0 { + delete(m.clusterSubscriptions, cluster) + } } + m.clustersFromLastRouteConfig = newClusters + + // maybeSendUpdate is called to update the configuration with the new route, + // start watching the newly added clusters and stop watching clusters that + // are not needed anymore. m.maybeSendUpdateLocked() } @@ -489,7 +512,7 @@ func (m *DependencyManager) onListenerResourceUpdate(update *xdsresource.Listene m.listenerWatcher.setLastUpdate(update) - if update.InlineRouteConfig != nil { + if update.APIListener != nil && update.APIListener.InlineRouteConfig != nil { // If there was a previous route config watcher because of a non-inline // route configuration, cancel it. m.rdsResourceName = "" @@ -497,14 +520,18 @@ func (m *DependencyManager) onListenerResourceUpdate(update *xdsresource.Listene m.routeConfigWatcher.stop() } m.routeConfigWatcher = &xdsResourceState[xdsresource.RouteConfigUpdate, routeExtras]{stop: func() {}} - m.applyRouteConfigUpdateLocked(update.InlineRouteConfig) + m.applyRouteConfigUpdateLocked(update.APIListener.InlineRouteConfig) return } // We get here only if there was no inline route configuration. If the route // config name has not changed, send an update with existing route // configuration and the newly received listener configuration. - if m.rdsResourceName == update.RouteConfigName { + if update.APIListener == nil { + m.logger.Errorf("Received a listener resource with no api_listener configuration") + return + } + if m.rdsResourceName == update.APIListener.RouteConfigName { m.maybeSendUpdateLocked() return } @@ -513,7 +540,7 @@ func (m *DependencyManager) onListenerResourceUpdate(update *xdsresource.Listene // new one. At this point, since the new route config name has not yet been // resolved, no update is sent to the channel, and therefore the old route // configuration (if received) is used until the new one is received. - m.rdsResourceName = update.RouteConfigName + m.rdsResourceName = update.APIListener.RouteConfigName if m.routeConfigWatcher != nil { m.routeConfigWatcher.stop() } @@ -666,8 +693,8 @@ func (m *DependencyManager) onClusterResourceError(resourceName string, err erro m.maybeSendUpdateLocked() } -// Records the error in the state. The last successful update is retained -// because it should continue to be used as an amnbient error is received. +// Ambient errors from cluster resource are logged and the last successful +// update is retained because it should continue to be used. func (m *DependencyManager) onClusterAmbientError(resourceName string, err error, onDone func()) { m.mu.Lock() defer m.mu.Unlock() @@ -725,24 +752,26 @@ func (m *DependencyManager) onEndpointResourceError(resourceName string, err err return } m.logger.Warningf("Received resource error for Endpoint resource %q: %v", resourceName, m.annotateErrorWithNodeID(err)) - m.endpointWatchers[resourceName].setLastError(err) + // Send an empty EndpointsUpdate instead of nil to avoid nil-check handling + // in the CDS balancer. The priority balancer will handle the case of having + // no endpoints and transition the channel to Transient Failure if needed. + m.endpointWatchers[resourceName].lastUpdate = &xdsresource.EndpointsUpdate{} + m.endpointWatchers[resourceName].lastErr = err + m.endpointWatchers[resourceName].updateReceived = true m.maybeSendUpdateLocked() } -// Records the ambient error without clearing the last successful update, as the -// endpoints should continue to be used. +// Logs the ambient error and does not update the state, as the last successful +// update for endpoints should continue to be used. func (m *DependencyManager) onEndpointAmbientError(resourceName string, err error, onDone func()) { m.mu.Lock() defer m.mu.Unlock() - defer onDone() if m.stopped || m.endpointWatchers[resourceName] == nil { return } m.logger.Warningf("Endpoint resource ambient error %q: %v", resourceName, m.annotateErrorWithNodeID(err)) - m.endpointWatchers[resourceName].updateLastError(err) - m.maybeSendUpdateLocked() } // Converts the DNS resolver state to an internal update, handling address-only @@ -765,10 +794,10 @@ func (m *DependencyManager) onDNSUpdate(resourceName string, update *resolver.St // Records a DNS resolver error. It clears the last update only if no successful // update has been received yet, then triggers a dependency update. // -// If a previous good update was received, the error is recorded but the -// previous update is retained for continued use. Errors are suppressed if a -// resource error was already received, as further propagation would have no -// downstream effect. +// If a previous good update was received, the error is logged and the previous +// update is retained for continued use. Errors are suppressed if a resource +// error was already received, as further propagation would have no downstream +// effect. func (m *DependencyManager) onDNSError(resourceName string, err error) { m.mu.Lock() defer m.mu.Unlock() @@ -781,11 +810,15 @@ func (m *DependencyManager) onDNSError(resourceName string, err error) { m.logger.Warningf("%v", err) state := m.dnsResolvers[resourceName] if state.updateReceived { - state.updateLastError(err) return } - state.setLastError(err) + // Send an empty DNSUpdate instead of nil to avoid nil-check handling in the + // CDS balancer. The priority balancer will handle the case of having no + // endpoints and transition the channel to Transient Failure if needed. + state.lastUpdate = &xdsresource.DNSUpdate{} + state.lastErr = err + state.updateReceived = true m.maybeSendUpdateLocked() } @@ -836,7 +869,9 @@ func (m *DependencyManager) newDNSResolver(target string) *xdsResourceState[xdsr err := fmt.Errorf("failed to parse DNS target %q: %v", target, m.annotateErrorWithNodeID(err)) m.logger.Warningf("%v", err) rcc.ReportError(err) - return &xdsResourceState[xdsresource.DNSUpdate, dnsExtras]{} + return &xdsResourceState[xdsresource.DNSUpdate, dnsExtras]{ + stop: func() {}, + } } r, err := resolver.Get("dns").Build(resolver.Target{URL: *u}, rcc, resolver.BuildOptions{}) @@ -844,7 +879,9 @@ func (m *DependencyManager) newDNSResolver(target string) *xdsResourceState[xdsr rcc.ReportError(err) err := fmt.Errorf("failed to build DNS resolver for target %q: %v", target, m.annotateErrorWithNodeID(err)) m.logger.Warningf("%v", err) - return nil + return &xdsResourceState[xdsresource.DNSUpdate, dnsExtras]{ + stop: func() {}, + } } return &xdsResourceState[xdsresource.DNSUpdate, dnsExtras]{ @@ -872,3 +909,100 @@ func (x *xdsResourceWatcher[T]) ResourceError(err error, onDone func()) { func (x *xdsResourceWatcher[T]) AmbientError(err error, onDone func()) { x.onAmbientError(err, onDone) } + +// xdsClusterSubscriberKey is the type used as the key to store the +// ClusterSubscriber interface in the Attributes field of resolver.states. +type xdsClusterSubscriberKey struct{} + +// SetXDSClusterSubscriber returns a copy of state in which the Attributes field +// is updated with the ClusterSubscriber interface. +func SetXDSClusterSubscriber(state resolver.State, subs ClusterSubscriber) resolver.State { + state.Attributes = state.Attributes.WithValue(xdsClusterSubscriberKey{}, subs) + return state +} + +// XDSClusterSubscriberFromResolverState returns ClusterSubscriber interface +// stored as an attribute in the resolver state. +func XDSClusterSubscriberFromResolverState(state resolver.State) ClusterSubscriber { + if v := state.Attributes.Value(xdsClusterSubscriberKey{}); v != nil { + return v.(ClusterSubscriber) + } + return nil +} + +// ClusterSubscriber allows dynamic subscription to clusters. This is useful for +// scenarios where the cluster name was not present in the RouteConfiguration, +// e.g. when the route uses a ClusterSpecifierPlugin. +// +// The xDS resolver will pass this interface to the LB policies as an attribute +// in the resolver update. +type ClusterSubscriber interface { + // SubscribeToCluster creates a dynamic subscription for the named cluster. + // + // The returned cancel function must be called when the subscription is no + // longer needed. It is safe to call cancel multiple times. + SubscribeToCluster(clusterName string) (cancel func()) +} + +// clusterRef represents a reference to a cluster and maintains reference count +// of the number of users of the cluster. +type clusterRef struct { + // Access to these field is protected by DependencyManager's mutex and so + // they don't need to be atomic. + + // staticRefCount comes from cluster being specified in the route + // configuration. + staticRefCount int32 + // dynamicRefCount comes from cluster being referenced by RPCs or being + // dynamically subscribed by the balancers in case of cluster specifier + // plugin being used. + dynamicRefCount int32 +} + +// SubscribeToCluster increments the reference count for the cluster. If the +// cluster is not already being tracked, it is added to the clusterSubscriptions +// map. It returns a function to unsubscribe from the cluster i.e. decrease its +// refcount. This returned function is idempotent, meaning it can be called +// multiple times without any additional effect. Calling Subscribe in a blocking +// manner while handling an update will lead to a deadlock. +func (m *DependencyManager) SubscribeToCluster(name string) func() { + m.mu.Lock() + defer m.mu.Unlock() + + // If the cluster is already being tracked, increment its dynamic refcount + // and return. + subs, ok := m.clusterSubscriptions[name] + if ok { + subs.dynamicRefCount++ + return sync.OnceFunc(func() { m.unsubscribeFromCluster(name) }) + } + + // If the cluster is not being tracked, add it with dynamic refcount as 1. + m.clusterSubscriptions[name] = &clusterRef{ + dynamicRefCount: 1, + } + m.maybeSendUpdateLocked() + return sync.OnceFunc(func() { m.unsubscribeFromCluster(name) }) +} + +// unsubscribeFromCluster decrements the reference count for the cluster. If +// both the reference counts reaches zero, it removes the cluster from the +// clusterSubscriptions map in the DependencyManager. Calling +// unsubscribeFromCluster in a blocking manner while handling an update will +// lead to a deadlock. +func (m *DependencyManager) unsubscribeFromCluster(name string) { + m.mu.Lock() + defer m.mu.Unlock() + c := m.clusterSubscriptions[name] + c.dynamicRefCount-- + // This should not happen as unsubscribe returned from the + // ClusterSubscription is wrapped in sync.OnceFunc() + if c.dynamicRefCount < 0 { + m.logger.Errorf("Reference count for a cluster dropped below zero") + } + if c.dynamicRefCount == 0 && c.staticRefCount == 0 { + delete(m.clusterSubscriptions, name) + // Since this cluster has no more references, cancel the watch for it. + m.maybeSendUpdateLocked() + } +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index 2ea763a49a..3b02b90916 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -19,10 +19,10 @@ package mem import ( - "sort" - "sync" + "fmt" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/mem" ) // BufferPool is a pool of buffers that can be shared and reused, resulting in @@ -38,20 +38,23 @@ type BufferPool interface { Put(*[]byte) } -const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2. - -var defaultBufferPoolSizes = []int{ - 256, - goPageSize, - 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) - 32 << 10, // 32KB (default buffer size for io.Copy) - 1 << 20, // 1MB -} - -var defaultBufferPool BufferPool +var ( + defaultBufferPoolSizeExponents = []uint8{ + 8, + 12, // Go page size, 4KB + 14, // 16KB (max HTTP/2 frame size used by gRPC) + 15, // 32KB (default buffer size for io.Copy) + 20, // 1MB + } + defaultBufferPool BufferPool +) func init() { - defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + var err error + defaultBufferPool, err = NewBinaryTieredBufferPool(defaultBufferPoolSizeExponents...) + if err != nil { + panic(fmt.Sprintf("Failed to create default buffer pool: %v", err)) + } internal.SetDefaultBufferPool = func(pool BufferPool) { defaultBufferPool = pool @@ -72,134 +75,22 @@ func DefaultBufferPool() BufferPool { // NewTieredBufferPool returns a BufferPool implementation that uses multiple // underlying pools of the given pool sizes. func NewTieredBufferPool(poolSizes ...int) BufferPool { - sort.Ints(poolSizes) - pools := make([]*sizedBufferPool, len(poolSizes)) - for i, s := range poolSizes { - pools[i] = newSizedBufferPool(s) - } - return &tieredBufferPool{ - sizedPools: pools, - } -} - -// tieredBufferPool implements the BufferPool interface with multiple tiers of -// buffer pools for different sizes of buffers. -type tieredBufferPool struct { - sizedPools []*sizedBufferPool - fallbackPool simpleBufferPool -} - -func (p *tieredBufferPool) Get(size int) *[]byte { - return p.getPool(size).Get(size) + return mem.NewTieredBufferPool(poolSizes...) } -func (p *tieredBufferPool) Put(buf *[]byte) { - p.getPool(cap(*buf)).Put(buf) +// NewBinaryTieredBufferPool returns a BufferPool backed by multiple sub-pools. +// This structure enables O(1) lookup time for Get and Put operations. +// +// The arguments provided are the exponents for the buffer capacities (powers +// of 2), not the raw byte sizes. For example, to create a pool of 16KB buffers +// (2^14 bytes), pass 14 as the argument. +func NewBinaryTieredBufferPool(powerOfTwoExponents ...uint8) (BufferPool, error) { + return mem.NewBinaryTieredBufferPool(powerOfTwoExponents...) } -func (p *tieredBufferPool) getPool(size int) BufferPool { - poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { - return p.sizedPools[i].defaultSize >= size - }) - - if poolIdx == len(p.sizedPools) { - return &p.fallbackPool - } - - return p.sizedPools[poolIdx] -} - -// sizedBufferPool is a BufferPool implementation that is optimized for specific -// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size -// of 16kb and a sizedBufferPool can be configured to only return buffers with a -// capacity of 16kb. Note that however it does not support returning larger -// buffers and in fact panics if such a buffer is requested. Because of this, -// this BufferPool implementation is not meant to be used on its own and rather -// is intended to be embedded in a tieredBufferPool such that Get is only -// invoked when the required size is smaller than or equal to defaultSize. -type sizedBufferPool struct { - pool sync.Pool - defaultSize int -} - -func (p *sizedBufferPool) Get(size int) *[]byte { - buf, ok := p.pool.Get().(*[]byte) - if !ok { - buf := make([]byte, size, p.defaultSize) - return &buf - } - b := *buf - clear(b[:cap(b)]) - *buf = b[:size] - return buf -} - -func (p *sizedBufferPool) Put(buf *[]byte) { - if cap(*buf) < p.defaultSize { - // Ignore buffers that are too small to fit in the pool. Otherwise, when - // Get is called it will panic as it tries to index outside the bounds - // of the buffer. - return - } - p.pool.Put(buf) -} - -func newSizedBufferPool(size int) *sizedBufferPool { - return &sizedBufferPool{ - defaultSize: size, - } -} - -var _ BufferPool = (*simpleBufferPool)(nil) - -// simpleBufferPool is an implementation of the BufferPool interface that -// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to -// acquire a buffer from the pool but if that buffer is too small, it returns it -// to the pool and creates a new one. -type simpleBufferPool struct { - pool sync.Pool -} - -func (p *simpleBufferPool) Get(size int) *[]byte { - bs, ok := p.pool.Get().(*[]byte) - if ok && cap(*bs) >= size { - clear((*bs)[:cap(*bs)]) - *bs = (*bs)[:size] - return bs - } - - // A buffer was pulled from the pool, but it is too small. Put it back in - // the pool and create one large enough. - if ok { - p.pool.Put(bs) - } - - // If we're going to allocate, round up to the nearest page. This way if - // requests frequently arrive with small variation we don't allocate - // repeatedly if we get unlucky and they increase over time. By default we - // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we - // can round up efficiently. - allocSize := (size + goPageSize - 1) & ^(goPageSize - 1) - - b := make([]byte, size, allocSize) - return &b -} - -func (p *simpleBufferPool) Put(buf *[]byte) { - p.pool.Put(buf) -} - -var _ BufferPool = NopBufferPool{} - // NopBufferPool is a buffer pool that returns new buffers without pooling. -type NopBufferPool struct{} - -// Get returns a buffer with specified length from the pool. -func (NopBufferPool) Get(length int) *[]byte { - b := make([]byte, length) - return &b +type NopBufferPool struct { + mem.NopBufferPool } -// Put returns a buffer to the pool. -func (NopBufferPool) Put(*[]byte) { -} +var _ BufferPool = NopBufferPool{} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index aa52bfe95f..0183ab22f4 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -192,7 +192,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } - logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + if logger.V(2) { + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + } // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index c3c15ac96f..789a5abab6 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,6 +20,7 @@ package resolver import ( "encoding/base64" + "iter" "sort" "strings" ) @@ -135,6 +136,7 @@ func (a *AddressMapV2[T]) Len() int { } // Keys returns a slice of all current map keys. +// Deprecated: Use AddressMapV2.All() instead. func (a *AddressMapV2[T]) Keys() []Address { ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { @@ -146,6 +148,7 @@ func (a *AddressMapV2[T]) Keys() []Address { } // Values returns a slice of all current map values. +// Deprecated: Use AddressMapV2.All() instead. func (a *AddressMapV2[T]) Values() []T { ret := make([]T, 0, a.Len()) for _, entryList := range a.m { @@ -156,6 +159,19 @@ func (a *AddressMapV2[T]) Values() []T { return ret } +// All returns an iterator over all elements. +func (a *AddressMapV2[T]) All() iter.Seq2[Address, T] { + return func(yield func(Address, T) bool) { + for _, entryList := range a.m { + for _, entry := range entryList { + if !yield(entry.addr, entry.value) { + return + } + } + } + } +} + type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the @@ -223,6 +239,7 @@ func (em *EndpointMap[T]) Len() int { // the unordered set of addresses. Thus, endpoint information returned is not // the full endpoint data (drops duplicated addresses and attributes) but can be // used for EndpointMap accesses. +// Deprecated: Use EndpointMap.All() instead. func (em *EndpointMap[T]) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) for _, en := range em.endpoints { @@ -232,6 +249,7 @@ func (em *EndpointMap[T]) Keys() []Endpoint { } // Values returns a slice of all current map values. +// Deprecated: Use EndpointMap.All() instead. func (em *EndpointMap[T]) Values() []T { ret := make([]T, 0, len(em.endpoints)) for _, val := range em.endpoints { @@ -240,6 +258,22 @@ func (em *EndpointMap[T]) Values() []T { return ret } +// All returns an iterator over all elements. +// The map keys are endpoints specifying the addresses present in the endpoint +// map, in which uniqueness is determined by the unordered set of addresses. +// Thus, endpoint information returned is not the full endpoint data (drops +// duplicated addresses and attributes) but can be used for EndpointMap +// accesses. +func (em *EndpointMap[T]) All() iter.Seq2[Endpoint, T] { + return func(yield func(Endpoint, T) bool) { + for _, en := range em.endpoints { + if !yield(en.decodedKey, en.value) { + return + } + } + } +} + // Delete removes the specified endpoint from the map. func (em *EndpointMap[T]) Delete(e Endpoint) { en := encodeEndpoint(e) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 8160f94304..ee7f7dead1 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -961,24 +961,32 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM return out, nil } -// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor. -// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data -// does not exceed the specified maximum size and returns an error if this limit is exceeded. -// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit. +// decompress processes the given data by decompressing it using either +// a custom decompressor or a standard compressor. If a custom decompressor +// is provided, it takes precedence. The function validates that +// the decompressed data does not exceed the specified maximum size and returns +// an error if this limit is exceeded. On success, it returns the decompressed +// data. Otherwise, it returns an error if decompression fails or the data +// exceeds the size limit. func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) { if dc != nil { - uncompressed, err := dc.Do(d.Reader()) + r := d.Reader() + uncompressed, err := dc.Do(r) if err != nil { + r.Close() // ensure buffers are reused return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if len(uncompressed) > maxReceiveMessageSize { + r.Close() // ensure buffers are reused return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize) } return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil } if compressor != nil { - dcReader, err := compressor.Decompress(d.Reader()) + r := d.Reader() + dcReader, err := compressor.Decompress(r) if err != nil { + r.Close() // ensure buffers are reused return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) } @@ -990,11 +998,13 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress } out, err := mem.ReadAll(dcReader, pool) if err != nil { + r.Close() // ensure buffers are reused out.Free() return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) } if out.Len() > maxReceiveMessageSize { + r.Close() // ensure buffers are reused out.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8efb29a7b9..5229adf711 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -192,6 +192,7 @@ var defaultServerOptions = serverOptions{ maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, + sharedWriteBuffer: true, readBufferSize: defaultReadBufSize, bufferPool: mem.DefaultBufferPool(), } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 76c2eed773..12f649dcb7 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.79.3" +const Version = "1.80.0" diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go index d49396cec1..e48f809d02 100644 --- a/vendor/google.golang.org/grpc/xds/server.go +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -25,21 +25,15 @@ import ( "net" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - iresolver "google.golang.org/grpc/internal/resolver" istats "google.golang.org/grpc/internal/stats" - "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/internal/xds/server" "google.golang.org/grpc/internal/xds/xdsclient" - "google.golang.org/grpc/internal/xds/xdsclient/xdsresource" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" ) const serverPrefix = "[xds-server %p] " @@ -236,78 +230,10 @@ func (s *GRPCServer) GracefulStop() { } } -// routeAndProcess routes the incoming RPC to a configured route in the route -// table and also processes the RPC by running the incoming RPC through any HTTP -// Filters configured. -func routeAndProcess(ctx context.Context) error { - conn := transport.GetConnection(ctx) - cw, ok := conn.(interface { - UsableRouteConfiguration() xdsresource.UsableRouteConfiguration - }) - if !ok { - return errors.New("missing virtual hosts in incoming context") - } - - rc := cw.UsableRouteConfiguration() - // Error out at routing l7 level with a status code UNAVAILABLE, represents - // an nack before usable route configuration or resource not found for RDS - // or error combining LDS + RDS (Shouldn't happen). - if rc.Err != nil { - if logger.V(2) { - logger.Infof("RPC on connection with xDS Configuration error: %v", rc.Err) - } - return status.Error(codes.Unavailable, fmt.Sprintf("error from xDS configuration for matched route configuration: %v", rc.Err)) - } - - mn, ok := grpc.Method(ctx) - if !ok { - return errors.New("missing method name in incoming context") - } - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return errors.New("missing metadata in incoming context") - } - // A41 added logic to the core grpc implementation to guarantee that once - // the RPC gets to this point, there will be a single, unambiguous authority - // present in the header map. - authority := md.Get(":authority") - vh := xdsresource.FindBestMatchingVirtualHostServer(authority[0], rc.VHS) - if vh == nil { - return rc.StatusErrWithNodeID(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") - } - - var rwi *xdsresource.RouteWithInterceptors - rpcInfo := iresolver.RPCInfo{ - Context: ctx, - Method: mn, - } - for _, r := range vh.Routes { - if r.M.Match(rpcInfo) { - // "NonForwardingAction is expected for all Routes used on - // server-side; a route with an inappropriate action causes RPCs - // matching that route to fail with UNAVAILABLE." - A36 - if r.ActionType != xdsresource.RouteActionNonForwardingAction { - return rc.StatusErrWithNodeID(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") - } - rwi = &r - break - } - } - if rwi == nil { - return rc.StatusErrWithNodeID(codes.Unavailable, "the incoming RPC did not match a configured Route") - } - for _, interceptor := range rwi.Interceptors { - if err := interceptor.AllowRPC(ctx); err != nil { - return rc.StatusErrWithNodeID(codes.PermissionDenied, "Incoming RPC is not allowed: %v", err) - } - } - return nil -} - // xdsUnaryInterceptor is the unary interceptor added to the gRPC server to // perform any xDS specific functionality on unary RPCs. func xdsUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { - if err := routeAndProcess(ctx); err != nil { + if err := server.RouteAndProcess(ctx); err != nil { return nil, err } return handler(ctx, req) @@ -316,7 +242,7 @@ func xdsUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, // xdsStreamInterceptor is the stream interceptor added to the gRPC server to // perform any xDS specific functionality on streaming RPCs. func xdsStreamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if err := routeAndProcess(ss.Context()); err != nil { + if err := server.RouteAndProcess(ss.Context()); err != nil { return err } return handler(srv, ss) diff --git a/vendor/modules.txt b/vendor/modules.txt index 0007e0c113..afb842ef41 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -99,7 +99,7 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 ## explicit; go 1.24.0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp # github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 @@ -1618,7 +1618,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.42.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform @@ -1632,7 +1632,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.42.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal @@ -1686,8 +1686,8 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.9.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/proto/otlp v1.10.0 +## explicit; go 1.24.0 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 @@ -1907,8 +1907,8 @@ google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/googleapis/type/latlng google.golang.org/genproto/googleapis/type/timeofday google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 -## explicit; go 1.24.0 +# google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 +## explicit; go 1.25.0 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/distribution @@ -1917,12 +1917,12 @@ google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -# google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 ## explicit; go 1.25.0 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.79.3 +# google.golang.org/grpc v1.80.0 ## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1997,6 +1997,7 @@ google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/hierarchy google.golang.org/grpc/internal/idle +google.golang.org/grpc/internal/mem google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/proto/grpc_lookup_v1 @@ -2020,7 +2021,6 @@ google.golang.org/grpc/internal/xds/balancer google.golang.org/grpc/internal/xds/balancer/cdsbalancer google.golang.org/grpc/internal/xds/balancer/clusterimpl google.golang.org/grpc/internal/xds/balancer/clustermanager -google.golang.org/grpc/internal/xds/balancer/clusterresolver google.golang.org/grpc/internal/xds/balancer/loadstore google.golang.org/grpc/internal/xds/balancer/outlierdetection google.golang.org/grpc/internal/xds/balancer/priority