diff --git a/go.mod b/go.mod
index f89fcd338..ea81b5eab 100644
--- a/go.mod
+++ b/go.mod
@@ -4,9 +4,9 @@ go 1.25.0
require (
github.com/coredns/caddy v1.1.4
- github.com/coredns/coredns v1.13.2
+ github.com/coredns/coredns v1.14.1
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
- github.com/miekg/dns v1.1.69
+ github.com/miekg/dns v1.1.70
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.38.3
github.com/prometheus/client_golang v1.23.2
@@ -17,10 +17,10 @@ require (
go.etcd.io/etcd/client/pkg/v3 v3.6.7
go.etcd.io/etcd/client/v2 v2.305.26
go.etcd.io/etcd/client/v3 v3.6.7
- golang.org/x/net v0.48.0
- k8s.io/api v0.34.2
+ golang.org/x/net v0.49.0
+ k8s.io/api v0.34.3
k8s.io/apimachinery v0.35.0
- k8s.io/client-go v0.34.2
+ k8s.io/client-go v0.34.3
k8s.io/component-base v0.34.2
k8s.io/klog/v2 v2.130.1
k8s.io/kubernetes v1.34.2
@@ -38,13 +38,12 @@ require (
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.71.0 // indirect
github.com/DataDog/datadog-go/v5 v5.6.0 // indirect
- github.com/DataDog/dd-trace-go/v2 v2.4.0 // indirect
- github.com/DataDog/go-libddwaf/v4 v4.6.1 // indirect
+ github.com/DataDog/dd-trace-go/v2 v2.5.0 // indirect
+ github.com/DataDog/go-libddwaf/v4 v4.8.0 // indirect
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633 // indirect
github.com/DataDog/go-sqllexer v0.1.8 // indirect
github.com/DataDog/go-tuf v1.1.1-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.7 // indirect
- github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@@ -70,13 +69,12 @@ require (
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
- github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -103,11 +101,11 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
- github.com/prometheus/common v0.67.4 // indirect
+ github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
- github.com/quic-go/quic-go v0.57.0 // indirect
+ github.com/quic-go/quic-go v0.59.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.8-0.20250809033336-ffcdc2b7662f // indirect
github.com/spf13/cobra v1.9.1 // indirect
@@ -120,9 +118,10 @@ require (
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/collector/component v1.39.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.39.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.46.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.133.0 // indirect
- go.opentelemetry.io/collector/pdata v1.39.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.46.0 // indirect
+ go.opentelemetry.io/collector/pdata/pprofile v0.140.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/log v0.13.0 // indirect
@@ -135,21 +134,21 @@ require (
go.uber.org/zap v1.27.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/crypto v0.46.0 // indirect
+ golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
- golang.org/x/mod v0.30.0 // indirect
- golang.org/x/oauth2 v0.33.0 // indirect
+ golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.19.0 // indirect
- golang.org/x/sys v0.39.0 // indirect
- golang.org/x/term v0.38.0 // indirect
- golang.org/x/text v0.32.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/term v0.39.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.14.0 // indirect
- golang.org/x/tools v0.39.0 // indirect
+ golang.org/x/tools v0.40.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect
- google.golang.org/grpc v1.77.0 // indirect
- google.golang.org/protobuf v1.36.10 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
+ google.golang.org/grpc v1.78.0 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
diff --git a/go.sum b/go.sum
index fbf5be985..beb2672df 100644
--- a/go.sum
+++ b/go.sum
@@ -18,10 +18,10 @@ github.com/DataDog/datadog-agent/pkg/version v0.71.0 h1:jqkKmhFrhHSLpiC3twQFDCXU
github.com/DataDog/datadog-agent/pkg/version v0.71.0/go.mod h1:FYj51C1ib86rpr5tlLEep9jitqvljIJ5Uz2rrimGTeY=
github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw=
github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
-github.com/DataDog/dd-trace-go/v2 v2.4.0 h1:ZbH4h/S5p0tH5Prr4EIo5QwEnXXGUNtxJcOwB7c1d1o=
-github.com/DataDog/dd-trace-go/v2 v2.4.0/go.mod h1:EEOkhOJlb37u+k07/9cwKCvtDC/mWjWnHrGkkk/iZCo=
-github.com/DataDog/go-libddwaf/v4 v4.6.1 h1:wGUioRkQ2a5MYr2wTn5uZfMENbLV4uKXrkr6zCVInCs=
-github.com/DataDog/go-libddwaf/v4 v4.6.1/go.mod h1:/AZqP6zw3qGJK5mLrA0PkfK3UQDk1zCI2fUNCt4xftE=
+github.com/DataDog/dd-trace-go/v2 v2.5.0 h1:Tp4McT135WhbdT/6BYcAoRvl5gH7YKzehSo6Q3uuxBM=
+github.com/DataDog/dd-trace-go/v2 v2.5.0/go.mod h1:A9rVmQfyzYUFCctFdKkli9us7G/YhXlMICpQ958wJUA=
+github.com/DataDog/go-libddwaf/v4 v4.8.0 h1:m6Bl1lS2RtVN4MtdTYhR5vJ2fWQ3WmNy4FiNBpzrp6w=
+github.com/DataDog/go-libddwaf/v4 v4.8.0/go.mod h1:/AZqP6zw3qGJK5mLrA0PkfK3UQDk1zCI2fUNCt4xftE=
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633 h1:ZRLR9Lbym748e8RznWzmSoK+OfV+8qW6SdNYA4/IqdA=
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633/go.mod h1:YFoTl1xsMzdSRFIu33oCSPS/3+HZAPGpO3oOM96wXCM=
github.com/DataDog/go-sqllexer v0.1.8 h1:ku9DpghFHeyyviR28W/3R4cCJwzpsuC08YIoltnx5ds=
@@ -52,8 +52,8 @@ github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
github.com/coredns/caddy v1.1.4 h1:+Lls5xASB0QsA2jpCroCOwpPlb5GjIGlxdjXxdX0XVo=
github.com/coredns/caddy v1.1.4/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
-github.com/coredns/coredns v1.13.2 h1:PPICLK9DBxPC2sGp/tbTL9B4JkyHHIIek9JeWNuIwUw=
-github.com/coredns/coredns v1.13.2/go.mod h1:0mSg9IzNSCdf8vAcnzf1HNAKwXNElrrcoljOEVpT3uk=
+github.com/coredns/coredns v1.14.1 h1:U7ZvMsMn3IfXhaiEHKkW0wsCKG4H5dPvWyMeSLhAodM=
+github.com/coredns/coredns v1.14.1/go.mod h1:oYbISnKw+U930dyDU+VVJ+VCWpRD/frU7NfHlqeqH7U=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
@@ -111,8 +111,6 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
-github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -143,8 +141,8 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
@@ -180,8 +178,8 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/miekg/dns v1.1.69 h1:Kb7Y/1Jo+SG+a2GtfoFUfDkG//csdRPwRLkCsxDG9Sc=
-github.com/miekg/dns v1.1.69/go.mod h1:7OyjD9nEba5OkqQ/hB4fy3PIoxafSZJtducccIelz3g=
+github.com/miekg/dns v1.1.70 h1:DZ4u2AV35VJxdD9Fo9fIWm119BsQL5cZU1cQ9s0LkqA=
+github.com/miekg/dns v1.1.70/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/minio/simdjson-go v0.4.5 h1:r4IQwjRGmWCQ2VeMc7fGiilu1z5du0gJ/I/FsKwgo5A=
github.com/minio/simdjson-go v0.4.5/go.mod h1:eoNz0DcLQRyEDeaPr4Ru6JpjlZPzbA0IodxVJk8lO8E=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -236,16 +234,16 @@ github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
-github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
-github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
+github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
-github.com/quic-go/quic-go v0.57.0 h1:AsSSrrMs4qI/hLrKlTH/TGQeTMY0ib1pAOX7vA3AdqE=
-github.com/quic-go/quic-go v0.57.0/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s=
+github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
+github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
@@ -323,14 +321,14 @@ go.opentelemetry.io/collector/consumer/consumertest v0.133.0 h1:MteqaGpgmHVHFqnB
go.opentelemetry.io/collector/consumer/consumertest v0.133.0/go.mod h1:vHGknLn/RRUcMQuuBDt+SgrpDN46DBJyqRnWXm3gLwY=
go.opentelemetry.io/collector/consumer/xconsumer v0.133.0 h1:Xx4Yna/We4qDlbAla1nfxgkvujzWRuR8bqqwsLLvYSg=
go.opentelemetry.io/collector/consumer/xconsumer v0.133.0/go.mod h1:he874Md/0uAS2Fs+TDHAy10OBLRSw8233LdREizVvG4=
-go.opentelemetry.io/collector/featuregate v1.39.0 h1:OlXZWW+WUP8cgKh2mnwgWXUJO/29irb0hG6jvwscRKM=
-go.opentelemetry.io/collector/featuregate v1.39.0/go.mod h1:A72x92glpH3zxekaUybml1vMSv94BH6jQRn5+/htcjw=
+go.opentelemetry.io/collector/featuregate v1.46.0 h1:z3JlymFdWW6aDo9cYAJ6bCqT+OI2DlurJ9P8HqfuKWQ=
+go.opentelemetry.io/collector/featuregate v1.46.0/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4=
go.opentelemetry.io/collector/internal/telemetry v0.133.0 h1:YxbckZC9HniNOZgnSofTOe0AB/bEsmISNdQeS+3CU3o=
go.opentelemetry.io/collector/internal/telemetry v0.133.0/go.mod h1:akUK7X6ZQ+CbbCjyXLv9y/EHt5jIy+J+nGoLvndZN14=
-go.opentelemetry.io/collector/pdata v1.39.0 h1:jr0f033o57Hpbj2Il8M15tPbvrOgY/Aoc+/+sxzhSFU=
-go.opentelemetry.io/collector/pdata v1.39.0/go.mod h1:jmolu6zwqNaq8qJ4IgCpNWBEwJNPLE1qqOz9GnpqxME=
-go.opentelemetry.io/collector/pdata/pprofile v0.133.0 h1:ewFYqV2FU4D0ixTdkJueaI2JGCoeiIJisX8EdHejDi8=
-go.opentelemetry.io/collector/pdata/pprofile v0.133.0/go.mod h1:5l4/B0iCxzoVkA7eOLzIHV0AUEO2IKypTHTLq9JKsHs=
+go.opentelemetry.io/collector/pdata v1.46.0 h1:XzhnIWNtc/gbOyFiewRvybR4s3phKHrWxL3yc/wVLDo=
+go.opentelemetry.io/collector/pdata v1.46.0/go.mod h1:D2e3BWCUC/bUg29WNzCDVN7Ab0Gzk7hGXZL2pnrDOn0=
+go.opentelemetry.io/collector/pdata/pprofile v0.140.0 h1:b9TZ6UnyzsT/ERQw2VKGi/NYLtKSmjG7cgQuc9wZt5s=
+go.opentelemetry.io/collector/pdata/pprofile v0.140.0/go.mod h1:/2s/YBWGbu+r8MuKu5zas08iSqe+3P6xnbRpfE2DWAA=
go.opentelemetry.io/collector/pdata/testdata v0.133.0 h1:K0q47qecWVJf0sWbeWfifbJ72TiqR+A2PCsMkCEKvus=
go.opentelemetry.io/collector/pdata/testdata v0.133.0/go.mod h1:/emFpIox/mi7FucvsSn54KsiMh/iy7BUviqgURNVT6U=
go.opentelemetry.io/collector/pipeline v1.39.0 h1:CcEn30qdoHEzehFxgx0Ma0pWYGhrrIkRkcu218NG4V4=
@@ -359,12 +357,12 @@ go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/slim/otlp v1.7.1 h1:lZ11gEokjIWYM3JWOUrIILr2wcf6RX+rq5SPObV9oyc=
-go.opentelemetry.io/proto/slim/otlp v1.7.1/go.mod h1:uZ6LJWa49eNM/EXnnvJGTTu8miokU8RQdnO980LJ57g=
-go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.0.1 h1:Tr/eXq6N7ZFjN+THBF/BtGLUz8dciA7cuzGRsCEkZ88=
-go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.0.1/go.mod h1:riqUmAOJFDFuIAzZu/3V6cOrTyfWzpgNJnG5UwrapCk=
-go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.0.1 h1:z/oMlrCv3Kopwh/dtdRagJy+qsRRPA86/Ux3g7+zFXM=
-go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.0.1/go.mod h1:C7EHYSIiaALi9RnNORCVaPCQDuJgJEn/XxkctaTez1E=
+go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE=
+go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -385,16 +383,16 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
-golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
-golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -403,10 +401,10 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
-golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
-golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
-golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
+golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
+golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -437,15 +435,15 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
-golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
-golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
-golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -455,8 +453,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
-golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
+golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
+golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -467,20 +465,20 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
-google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
-google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 h1:Wgl1rcDNThT+Zn47YyCXOXyX/COgMTIdhJ717F0l4xk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
-google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
-google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
+google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
+google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
-google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -499,12 +497,12 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY=
-k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw=
+k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
+k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
-k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M=
-k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE=
+k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
+k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ=
k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
diff --git a/rules.mk b/rules.mk
index e8f2dafe3..37e281323 100644
--- a/rules.mk
+++ b/rules.mk
@@ -33,7 +33,7 @@ ALL_ARCH := amd64 arm arm64 ppc64le s390x
# Find the latest hash by opening in browser https://gcr.io/distroless/static-debian12:latest
BASEIMAGE ?= gcr.io/distroless/static-debian12@sha256:4b2a093ef4649bccd586625090a3c668b254cfe180dee54f4c94f3e9bd7e381e
# Find the correct tag at https://github.com/kubernetes/release/blob/master/images/build/distroless-iptables/variants.yaml
-IPTIMAGE ?= registry.k8s.io/build-image/distroless-iptables:v0.8.6@sha256:4e0a77d0973618ce2a76e65fa2dc97694eb690ac8baf69cefe6e20f17957d9dd
+IPTIMAGE ?= registry.k8s.io/build-image/distroless-iptables:v0.8.8@sha256:cb9c6a556c5ba13fd1442e27a73ba5b43a35bec87f05962c2285b865cd7f5bee
# These rules MUST be expanded at reference time (hence '=') as BINARY
# is dynamically scoped.
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_nooptracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_nooptracer.go
new file mode 100644
index 000000000..96ff052ed
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/civisibility_nooptracer.go
@@ -0,0 +1,90 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracer
+
+import (
+ "github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
+ "github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants"
+ "github.com/DataDog/dd-trace-go/v2/internal/log"
+)
+
+var _ Tracer = (*ciVisibilityNoopTracer)(nil)
+
+// ciVisibilityNoopTracer is an implementation of Tracer that is no-op for non CiVisibility spans
+// the usage of this tracer is limited to scenarios where the actual noop Tracer is used in the tests.
+// For those cases we don't want to change the behaviour, so we need to act like a noop one.
+// This scenario should be opt-in because with this we loose context propagation and children spans.
+type ciVisibilityNoopTracer struct {
+ Tracer
+}
+
+// wrapWithCiVisibilityNoopTracer creates a wrapped version of the Tracer that only accepts CiVisibility spans
+func wrapWithCiVisibilityNoopTracer(tracer Tracer) *ciVisibilityNoopTracer {
+ return &ciVisibilityNoopTracer{
+ Tracer: tracer,
+ }
+}
+
+// StartSpan implements Tracer.
+func (t *ciVisibilityNoopTracer) StartSpan(operationName string, opts ...StartSpanOption) *Span {
+ if opts != nil {
+ cfg := NewStartSpanConfig(opts...)
+ if cfg != nil && cfg.Tags != nil {
+ // Let's check if the span is a CIVisibility span.
+ // If yes, we create the span.
+ // If not, we just behave like a noop tracer.
+ if v, ok := cfg.Tags[ext.SpanType]; ok {
+ if v == constants.SpanTypeTest ||
+ v == constants.SpanTypeTestSuite ||
+ v == constants.SpanTypeTestModule ||
+ v == constants.SpanTypeTestSession {
+ return t.Tracer.StartSpan(operationName, []StartSpanOption{useConfig(cfg)}...)
+ }
+ }
+ }
+ }
+ log.Debug("CI Visibility tracer is behaving like a noop tracer, so the span will be skipped.")
+ return nil
+}
+
+// SetServiceInfo implements Tracer.
+func (t *ciVisibilityNoopTracer) SetServiceInfo(_, _, _ string) {}
+
+// Extract implements Tracer.
+func (t *ciVisibilityNoopTracer) Extract(_ interface{}) (*SpanContext, error) {
+ return nil, nil
+}
+
+// Inject implements Tracer.
+func (t *ciVisibilityNoopTracer) Inject(_ *SpanContext, _ interface{}) error { return nil }
+
+// Stop implements Tracer.
+func (t *ciVisibilityNoopTracer) Stop() {
+ t.Tracer.Stop()
+}
+
+func (t *ciVisibilityNoopTracer) TracerConf() TracerConf {
+ return t.Tracer.TracerConf()
+}
+
+func (t *ciVisibilityNoopTracer) Flush() {
+ t.Tracer.Flush()
+}
+
+func useConfig(config *StartSpanConfig) StartSpanOption {
+ return func(cfg *StartSpanConfig) {
+ if config == nil {
+ return
+ }
+
+ cfg.Parent = config.Parent
+ cfg.StartTime = config.StartTime
+ cfg.Tags = config.Tags
+ cfg.SpanID = config.SpanID
+ cfg.Context = config.Context
+ cfg.SpanLinks = config.SpanLinks
+ }
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/log.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/log.go
index 9e4148b7f..71dc2e9f7 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/log.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/log.go
@@ -130,7 +130,7 @@ func logStartup(t *tracer) {
Env: t.config.env,
Service: t.config.serviceName,
AgentURL: agentURL,
- Debug: t.config.debug,
+ Debug: t.config.internalConfig.Debug(),
AnalyticsEnabled: !math.IsNaN(globalconfig.AnalyticsRate()),
SampleRate: fmt.Sprintf("%f", t.rulesSampling.traces.globalRate),
SampleRateLimit: "disabled",
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/metrics.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/metrics.go
index f6cf9f3b5..2ecf9e942 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/metrics.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/metrics.go
@@ -93,26 +93,29 @@ func (t *tracer) reportHealthMetricsAtInterval(interval time.Duration) {
for {
select {
case <-ticker.C:
- // if there are started spans, report the number of spans with their integration, then
- // reset the count
- // the Count() function reports the total number of event occurrences in one time interval. We reset
- // our count to 0 regardless of if Count succeeded to cleanup before the next interval.
-
- for k, v := range t.spansStarted.GetAndReset() {
- t.statsd.Count("datadog.tracer.spans_started", v, []string{"integration:" + k}, 1)
- }
-
- // if there are finished spans, report the number of spans with their integration, then
- // reset the count
- // the Count() function reports the total number of event occurrences in one time interval. We reset
- // our count to 0 regardless of if Count succeeded to cleanup before the next interval.
- for k, v := range t.spansFinished.GetAndReset() {
- t.statsd.Count("datadog.tracer.spans_finished", v, []string{"integration:" + k}, 1)
- }
-
- t.statsd.Count("datadog.tracer.traces_dropped", int64(tracerstats.Count(tracerstats.TracesDropped)), []string{"reason:trace_too_large"}, 1)
+ t.reportHealthMetrics()
case <-t.stop:
return
}
}
}
+
+func (t *tracer) reportHealthMetrics() {
+ // if there are started spans, report the number of spans with their integration, then
+ // reset the count
+ // the Count() function reports the total number of event occurrences in one time interval. We reset
+ // our count to 0 regardless of if Count succeeded to cleanup before the next interval.
+ for k, v := range t.spansStarted.GetAndReset() {
+ t.statsd.Count("datadog.tracer.spans_started", v, []string{"integration:" + k}, 1)
+ }
+
+ // if there are finished spans, report the number of spans with their integration, then
+ // reset the count
+ // the Count() function reports the total number of event occurrences in one time interval. We reset
+ // our count to 0 regardless of if Count succeeded to cleanup before the next interval.
+ for k, v := range t.spansFinished.GetAndReset() {
+ t.statsd.Count("datadog.tracer.spans_finished", v, []string{"integration:" + k}, 1)
+ }
+
+ t.statsd.Count("datadog.tracer.traces_dropped", int64(tracerstats.Count(tracerstats.TracesDropped)), []string{"reason:trace_too_large"}, 1)
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/option.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/option.go
index 05ff11677..0ce39ee3a 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/option.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/option.go
@@ -6,6 +6,7 @@
package tracer
import (
+ "cmp"
"context"
"encoding/json"
"errors"
@@ -33,6 +34,7 @@ import (
"github.com/DataDog/dd-trace-go/v2/internal"
appsecconfig "github.com/DataDog/dd-trace-go/v2/internal/appsec/config"
"github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants"
+ internalconfig "github.com/DataDog/dd-trace-go/v2/internal/config"
"github.com/DataDog/dd-trace-go/v2/internal/env"
"github.com/DataDog/dd-trace-go/v2/internal/globalconfig"
llmobsconfig "github.com/DataDog/dd-trace-go/v2/internal/llmobs/config"
@@ -97,6 +99,7 @@ var contribIntegrations = map[string]struct {
"k8s.io/client-go/kubernetes": {"Kubernetes", false},
"github.com/labstack/echo/v4": {"echo v4", false},
"log/slog": {"log/slog", false},
+ "github.com/mark3labs/mcp-go": {"MCP", false},
"github.com/miekg/dns": {"miekg/dns", false},
"net/http": {"HTTP", false},
"gopkg.in/olivere/elastic.v5": {"Elasticsearch v5", false},
@@ -138,8 +141,8 @@ const (
// config holds the tracer configuration.
type config struct {
- // debug, when true, writes details to logs.
- debug bool
+ // internalConfig holds a reference to the global configuration singleton.
+ internalConfig *internalconfig.Config
// appsecStartOptions controls the options used when starting appsec features.
appsecStartOptions []appsecconfig.StartOption
@@ -319,6 +322,9 @@ type config struct {
// ciVisibilityAgentless controls if the tracer is loaded with CI Visibility agentless mode. default false
ciVisibilityAgentless bool
+ // ciVisibilityNoopTracer controls if CI Visibility must set a wrapper to behave like a noop tracer. default false
+ ciVisibilityNoopTracer bool
+
// logDirectory is directory for tracer logs specified by user-setting DD_TRACE_LOG_DIRECTORY. default empty/unused
logDirectory string
@@ -373,6 +379,7 @@ const partialFlushMinSpansDefault = 1000
// and passed user opts.
func newConfig(opts ...StartOption) (*config, error) {
c := new(config)
+ c.internalConfig = internalconfig.Get()
// If this was built with a recent-enough version of Orchestrion, force the orchestrion config to
// the baked-in values. We do this early so that opts can be used to override the baked-in values,
@@ -475,7 +482,6 @@ func newConfig(opts ...StartOption) (*config, error) {
c.logStartup = internal.BoolEnv("DD_TRACE_STARTUP_LOGS", true)
c.runtimeMetrics = internal.BoolVal(getDDorOtelConfig("metrics"), false)
c.runtimeMetricsV2 = internal.BoolEnv("DD_RUNTIME_METRICS_V2_ENABLED", true)
- c.debug = internal.BoolVal(getDDorOtelConfig("debugMode"), false)
c.logDirectory = env.Get("DD_TRACE_LOG_DIRECTORY")
c.enabled = newDynamicConfig("tracing_enabled", internal.BoolVal(getDDorOtelConfig("enabled"), true), func(_ bool) bool { return true }, equal[bool])
if _, ok := env.Lookup("DD_TRACE_ENABLED"); ok {
@@ -551,14 +557,10 @@ func newConfig(opts ...StartOption) (*config, error) {
if c.agentURL.Scheme == "unix" {
// If we're connecting over UDS we can just rely on the agent to provide the hostname
log.Debug("connecting to agent over unix, do not set hostname on any traces")
- c.httpClient = udsClient(c.agentURL.Path, c.httpClientTimeout)
- // TODO(darccio): use internal.UnixDataSocketURL instead
- c.agentURL = &url.URL{
- Scheme: "http",
- Host: fmt.Sprintf("UDS_%s", strings.NewReplacer(":", "_", "/", "_", `\`, "_").Replace(c.agentURL.Path)),
- }
+ c.httpClient = internal.UDSClient(c.agentURL.Path, cmp.Or(c.httpClientTimeout, defaultHTTPTimeout))
+ c.agentURL = internal.UnixDataSocketURL(c.agentURL.Path)
} else {
- c.httpClient = defaultHTTPClient(c.httpClientTimeout, false)
+ c.httpClient = internal.DefaultHTTPClient(c.httpClientTimeout, false)
}
}
WithGlobalTag(ext.RuntimeID, globalconfig.RuntimeID())(c)
@@ -610,10 +612,9 @@ func newConfig(opts ...StartOption) (*config, error) {
if c.logger != nil {
log.UseLogger(c.logger)
}
- if c.debug {
+ if c.internalConfig.Debug() {
log.SetLevel(log.LevelDebug)
}
-
// Check if CI Visibility mode is enabled
if internal.BoolEnv(constants.CIVisibilityEnabledEnvironmentVariable, false) {
c.ciVisibilityEnabled = true // Enable CI Visibility mode
@@ -622,6 +623,7 @@ func newConfig(opts ...StartOption) (*config, error) {
ciTransport := newCiVisibilityTransport(c) // Create a default CI Visibility Transport
c.transport = ciTransport // Replace the default transport with the CI Visibility transport
c.ciVisibilityAgentless = ciTransport.agentless
+ c.ciVisibilityNoopTracer = internal.BoolEnv(constants.CIVisibilityUseNoopTracer, false)
}
// if using stdout or traces are disabled or we are in ci visibility agentless mode, agent is disabled
@@ -742,29 +744,6 @@ func newStatsdClient(c *config) (internal.StatsdClient, error) {
return internal.NewStatsdClient(c.dogstatsdAddr, statsTags(c))
}
-// udsClient returns a new http.Client which connects using the given UDS socket path.
-func udsClient(socketPath string, timeout time.Duration) *http.Client {
- if timeout == 0 {
- timeout = defaultHTTPTimeout
- }
- return &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
- return defaultDialer(timeout).DialContext(ctx, "unix", (&net.UnixAddr{
- Name: socketPath,
- Net: "unix",
- }).String())
- },
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- },
- Timeout: timeout,
- }
-}
-
// defaultDogstatsdAddr returns the default connection address for Dogstatsd.
func defaultDogstatsdAddr() string {
envHost, envPort := env.Get("DD_DOGSTATSD_HOST"), env.Get("DD_DOGSTATSD_PORT")
@@ -1029,8 +1008,7 @@ func WithDebugStack(enabled bool) StartOption {
// WithDebugMode enables debug mode on the tracer, resulting in more verbose logging.
func WithDebugMode(enabled bool) StartOption {
return func(c *config) {
- telemetry.RegisterAppConfig("trace_debug_enabled", enabled, telemetry.OriginCode)
- c.debug = enabled
+ c.internalConfig.SetDebug(enabled, telemetry.OriginCode)
}
}
@@ -1203,7 +1181,7 @@ func WithSampler(s Sampler) StartOption {
}
}
-// WithRateSampler sets the given sampler rate to be used with the tracer.
+// WithSamplerRate sets the given sampler rate to be used with the tracer.
// The rate must be between 0 and 1. By default an all-permissive sampler rate (1) is used.
func WithSamplerRate(rate float64) StartOption {
return func(c *config) {
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otel_dd_mappings.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otel_dd_mappings.go
index 9d4e5c5a0..89dbfb37d 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otel_dd_mappings.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otel_dd_mappings.go
@@ -93,7 +93,8 @@ var propagationMapping = map[string]string{
func getDDorOtelConfig(configName string) string {
config, ok := otelDDConfigs[configName]
if !ok {
- panic(fmt.Sprintf("Programming Error: %v not found in supported configurations", configName))
+ log.Debug("Programming Error: %s not found in supported configurations", configName)
+ return ""
}
// 1. Check managed stable config if handsOff
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otelprocesscontext.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otelprocesscontext.go
new file mode 100644
index 000000000..f690ec905
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otelprocesscontext.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+package tracer
+
+// OtelProcessContext represents the OTEL context for the process.
+//
+//go:generate go run github.com/tinylib/msgp -unexported -marshal=true -o=otelprocesscontext_msgp.go -tests=false
+type otelProcessContext struct {
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/deployment/#deployment-environment-name
+ DeploymentEnvironmentName string `msg:"deployment.environment.name"`
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/host/#host-name
+ HostName string `msg:"host.name"`
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/service/#service-instance-id
+ ServiceInstanceID string `msg:"service.instance.id"`
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/service/#service-name
+ ServiceName string `msg:"service.name"`
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/service/#service-version
+ ServiceVersion string `msg:"service.version"`
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/telemetry/#telemetry-sdk-language
+ TelemetrySDKLanguage string `msg:"telemetry.sdk.language"`
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/telemetry/#telemetry-sdk-version
+ TelemetrySDKVersion string `msg:"telemetry.sdk.version"`
+ // https://opentelemetry.io/docs/specs/semconv/registry/attributes/telemetry/#telemetry-sdk-name
+ TelemetrySdkName string `msg:"telemetry.sdk.name"`
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otelprocesscontext_msgp.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otelprocesscontext_msgp.go
new file mode 100644
index 000000000..5b568286c
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/otelprocesscontext_msgp.go
@@ -0,0 +1,285 @@
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
+
+package tracer
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *otelProcessContext) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "deployment.environment.name":
+ z.DeploymentEnvironmentName, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "DeploymentEnvironmentName")
+ return
+ }
+ case "host.name":
+ z.HostName, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "HostName")
+ return
+ }
+ case "service.instance.id":
+ z.ServiceInstanceID, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceInstanceID")
+ return
+ }
+ case "service.name":
+ z.ServiceName, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceName")
+ return
+ }
+ case "service.version":
+ z.ServiceVersion, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceVersion")
+ return
+ }
+ case "telemetry.sdk.language":
+ z.TelemetrySDKLanguage, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySDKLanguage")
+ return
+ }
+ case "telemetry.sdk.version":
+ z.TelemetrySDKVersion, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySDKVersion")
+ return
+ }
+ case "telemetry.sdk.name":
+ z.TelemetrySdkName, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySdkName")
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *otelProcessContext) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 8
+ // write "deployment.environment.name"
+ err = en.Append(0x88, 0xbb, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.DeploymentEnvironmentName)
+ if err != nil {
+ err = msgp.WrapError(err, "DeploymentEnvironmentName")
+ return
+ }
+ // write "host.name"
+ err = en.Append(0xa9, 0x68, 0x6f, 0x73, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.HostName)
+ if err != nil {
+ err = msgp.WrapError(err, "HostName")
+ return
+ }
+ // write "service.instance.id"
+ err = en.Append(0xb3, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x69, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.ServiceInstanceID)
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceInstanceID")
+ return
+ }
+ // write "service.name"
+ err = en.Append(0xac, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.ServiceName)
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceName")
+ return
+ }
+ // write "service.version"
+ err = en.Append(0xaf, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.ServiceVersion)
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceVersion")
+ return
+ }
+ // write "telemetry.sdk.language"
+ err = en.Append(0xb6, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x73, 0x64, 0x6b, 0x2e, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.TelemetrySDKLanguage)
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySDKLanguage")
+ return
+ }
+ // write "telemetry.sdk.version"
+ err = en.Append(0xb5, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x73, 0x64, 0x6b, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.TelemetrySDKVersion)
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySDKVersion")
+ return
+ }
+ // write "telemetry.sdk.name"
+ err = en.Append(0xb2, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x73, 0x64, 0x6b, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.TelemetrySdkName)
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySdkName")
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *otelProcessContext) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 8
+ // string "deployment.environment.name"
+ o = append(o, 0x88, 0xbb, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ o = msgp.AppendString(o, z.DeploymentEnvironmentName)
+ // string "host.name"
+ o = append(o, 0xa9, 0x68, 0x6f, 0x73, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ o = msgp.AppendString(o, z.HostName)
+ // string "service.instance.id"
+ o = append(o, 0xb3, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x69, 0x64)
+ o = msgp.AppendString(o, z.ServiceInstanceID)
+ // string "service.name"
+ o = append(o, 0xac, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ o = msgp.AppendString(o, z.ServiceName)
+ // string "service.version"
+ o = append(o, 0xaf, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ o = msgp.AppendString(o, z.ServiceVersion)
+ // string "telemetry.sdk.language"
+ o = append(o, 0xb6, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x73, 0x64, 0x6b, 0x2e, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65)
+ o = msgp.AppendString(o, z.TelemetrySDKLanguage)
+ // string "telemetry.sdk.version"
+ o = append(o, 0xb5, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x73, 0x64, 0x6b, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
+ o = msgp.AppendString(o, z.TelemetrySDKVersion)
+ // string "telemetry.sdk.name"
+ o = append(o, 0xb2, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x73, 0x64, 0x6b, 0x2e, 0x6e, 0x61, 0x6d, 0x65)
+ o = msgp.AppendString(o, z.TelemetrySdkName)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *otelProcessContext) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "deployment.environment.name":
+ z.DeploymentEnvironmentName, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DeploymentEnvironmentName")
+ return
+ }
+ case "host.name":
+ z.HostName, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "HostName")
+ return
+ }
+ case "service.instance.id":
+ z.ServiceInstanceID, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceInstanceID")
+ return
+ }
+ case "service.name":
+ z.ServiceName, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceName")
+ return
+ }
+ case "service.version":
+ z.ServiceVersion, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ServiceVersion")
+ return
+ }
+ case "telemetry.sdk.language":
+ z.TelemetrySDKLanguage, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySDKLanguage")
+ return
+ }
+ case "telemetry.sdk.version":
+ z.TelemetrySDKVersion, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySDKVersion")
+ return
+ }
+ case "telemetry.sdk.name":
+ z.TelemetrySdkName, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "TelemetrySdkName")
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *otelProcessContext) Msgsize() (s int) {
+ s = 1 + 28 + msgp.StringPrefixSize + len(z.DeploymentEnvironmentName) + 10 + msgp.StringPrefixSize + len(z.HostName) + 20 + msgp.StringPrefixSize + len(z.ServiceInstanceID) + 13 + msgp.StringPrefixSize + len(z.ServiceName) + 16 + msgp.StringPrefixSize + len(z.ServiceVersion) + 23 + msgp.StringPrefixSize + len(z.TelemetrySDKLanguage) + 22 + msgp.StringPrefixSize + len(z.TelemetrySDKVersion) + 19 + msgp.StringPrefixSize + len(z.TelemetrySdkName)
+ return
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/payload_v1.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/payload_v1.go
index 06decad29..8659b9705 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/payload_v1.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/payload_v1.go
@@ -160,6 +160,7 @@ func (p *payloadV1) push(t spanList) (stats payloadStats, err error) {
p.chunks = append(p.chunks, tc)
p.recordItem()
+ p.update()
return p.stats(), err
}
@@ -268,9 +269,9 @@ func (p *payloadV1) Write(b []byte) (int, error) {
// Read implements io.Reader. It reads from the msgpack-encoded stream.
func (p *payloadV1) Read(b []byte) (n int, err error) {
+ // Ensure header and buffer are initialized (handles empty payload case)
if len(p.header) == 0 {
- p.header = make([]byte, 8)
- p.updateHeader()
+ p.update()
}
if p.readOff < len(p.header) {
// reading header
@@ -278,15 +279,22 @@ func (p *payloadV1) Read(b []byte) (n int, err error) {
p.readOff += n
return n, nil
}
- if len(p.buf) == 0 {
- p.encode()
- }
if p.reader == nil {
p.reader = bytes.NewReader(p.buf)
}
return p.reader.Read(b)
}
+func (p *payloadV1) update() {
+ if len(p.header) == 0 {
+ p.header = make([]byte, 8)
+ }
+ p.updateHeader()
+ // Reset the buffer length to 0 before re-encoding
+ p.buf = p.buf[:0]
+ p.encode()
+}
+
// encode writes existing payload fields into the buffer in msgp format.
func (p *payloadV1) encode() {
st := newStringTable()
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span.go
index 3c6206c6d..2481a5af8 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/span.go
@@ -13,10 +13,8 @@ import (
"encoding/json"
"fmt"
"reflect"
- "runtime"
"runtime/pprof"
rt "runtime/trace"
- "strconv"
"strings"
"sync"
"time"
@@ -30,6 +28,7 @@ import (
"github.com/DataDog/dd-trace-go/v2/internal/log"
"github.com/DataDog/dd-trace-go/v2/internal/orchestrion"
"github.com/DataDog/dd-trace-go/v2/internal/samplernames"
+ "github.com/DataDog/dd-trace-go/v2/internal/stacktrace"
"github.com/DataDog/dd-trace-go/v2/internal/telemetry"
"github.com/DataDog/dd-trace-go/v2/internal/traceprof"
@@ -485,46 +484,23 @@ func (s *Span) setTagError(value interface{}, cfg errorConfig) {
}
}
-// defaultStackLength specifies the default maximum size of a stack trace.
-const defaultStackLength = 32
-
// takeStacktrace takes a stack trace of maximum n entries, skipping the first skip entries.
-// If n is 0, up to 20 entries are retrieved.
-func takeStacktrace(n, skip uint) string {
+// If n is 0, the default depth from internal/stacktrace is used.
+// Uses the centralized internal/stacktrace implementation while preserving telemetry tracking.
+func takeStacktrace(depth uint, skip uint) string {
telemetry.Count(telemetry.NamespaceTracers, "errorstack.source", []string{"source:takeStacktrace"}).Submit(1)
now := time.Now()
defer func() {
dur := float64(time.Since(now))
telemetry.Distribution(telemetry.NamespaceTracers, "errorstack.duration", []string{"source:takeStacktrace"}).Submit(dur)
}()
- if n == 0 {
- n = defaultStackLength
- }
- var builder strings.Builder
- pcs := make([]uintptr, n)
- // +2 to exclude runtime.Callers and takeStacktrace
- numFrames := runtime.Callers(2+int(skip), pcs)
- if numFrames == 0 {
- return ""
- }
- frames := runtime.CallersFrames(pcs[:numFrames])
- for i := 0; ; i++ {
- frame, more := frames.Next()
- if i != 0 {
- builder.WriteByte('\n')
- }
- builder.WriteString(frame.Function)
- builder.WriteByte('\n')
- builder.WriteByte('\t')
- builder.WriteString(frame.File)
- builder.WriteByte(':')
- builder.WriteString(strconv.Itoa(frame.Line))
- if !more {
- break
- }
- }
- return builder.String()
+ // This is necessary for span error stacktraces where we want complete visibility.
+ // Skip +4: The old implementation used runtime.Callers(2+skip, ...) which skipped runtime.Callers
+ // and takeStacktrace. The internal/stacktrace package auto-filters its own frames, but we still
+ // need to account for: runtime.Callers(1) + takeStacktrace(1) + setTagError(1) + additional frame(1)
+ stack := stacktrace.SkipAndCaptureWithInternalFrames(int(depth), int(skip)+4)
+ return stacktrace.Format(stack)
}
// setMeta sets a string tag. This method is not safe for concurrent use.
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spancontext.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spancontext.go
index aeee388b1..954fef911 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spancontext.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/spancontext.go
@@ -621,10 +621,18 @@ func (t *trace) finishedOne(s *Span) {
}
telemetry.Distribution(telemetry.NamespaceTracers, "trace_partial_flush.spans_closed", nil).Submit(float64(len(finishedSpans)))
telemetry.Distribution(telemetry.NamespaceTracers, "trace_partial_flush.spans_remaining", nil).Submit(float64(len(leftoverSpans)))
- finishedSpans[0].setMetric(keySamplingPriority, *t.priority)
+ // #incident-46344 -- if we set metrics and tags on a different span than what was passed into this function,
+ // we need to lock this new span.
+ fSpan := finishedSpans[0]
+ currentSpanIsFirstInChunk := s == fSpan
+ if !currentSpanIsFirstInChunk {
+ fSpan.mu.Lock()
+ defer fSpan.mu.Unlock()
+ }
+ fSpan.setMetric(keySamplingPriority, *t.priority)
if s != t.spans[0] {
// Make sure the first span in the chunk has the trace-level tags
- t.setTraceTags(finishedSpans[0])
+ t.setTraceTags(fSpan)
}
if tr, ok := tr.(*tracer); ok {
t.finishChunk(tr, &chunk{
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer.go
index db1bf7a45..33b054183 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/tracer.go
@@ -219,7 +219,11 @@ func Start(opts ...StartOption) error {
t.Stop()
return nil
}
- setGlobalTracer(t)
+ if t.config.ciVisibilityEnabled && t.config.ciVisibilityNoopTracer {
+ setGlobalTracer(wrapWithCiVisibilityNoopTracer(t))
+ } else {
+ setGlobalTracer(t)
+ }
if t.dataStreams != nil {
t.dataStreams.Start()
}
@@ -234,6 +238,16 @@ func Start(opts ...StartOption) error {
return nil
}
+ if t.config.runtimeMetricsV2 {
+ l := slog.New(slogHandler{})
+ opts := &runtimemetrics.Options{Logger: l}
+ if t.runtimeMetrics, err = runtimemetrics.NewEmitter(t.statsd, opts); err == nil {
+ l.Debug("Runtime metrics v2 enabled.")
+ } else {
+ l.Error("Failed to enable runtime metrics v2", "err", err.Error())
+ }
+ }
+
// Start AppSec with remote configuration
cfg := remoteconfig.DefaultClientConfig()
cfg.AgentURL = t.config.agentURL.String()
@@ -267,15 +281,20 @@ func Start(opts ...StartOption) error {
// DD_INSTRUMENTATION_TELEMETRY_ENABLED env var
t.telemetry = startTelemetry(t.config)
- // store the configuration in an in-memory file, allowing it to be read to
- // determine if the process is instrumented with a tracer and to retrive
- // relevant tracing information.
+ // store the configuration in an in-memory file and in a named anonymous mapping,
+ // allowing it to be read to determine if the process is instrumented with a tracer
+ // and to retrieve relevant tracing information.
storeConfig(t.config)
globalinternal.SetTracerInitialized(true)
return nil
}
+// storeConfig stores the process level tracing context both in an in-memory file and
+// in a named anonymous mapping.
+// This allows an external process, such as the Datadog Agent or fullhost profiler,
+// to determine if the process is instrumented with a tracer and to retrieve the process
+// level tracing context.
func storeConfig(c *config) {
uuid, _ := uuid.NewRandom()
name := fmt.Sprintf("datadog-tracer-info-%s", uuid.String()[0:8])
@@ -298,6 +317,23 @@ func storeConfig(c *config) {
if err != nil {
log.Error("failed to store the configuration: %s", err.Error())
}
+
+ processContext := otelProcessContext{
+ DeploymentEnvironmentName: c.env,
+ HostName: c.hostname,
+ ServiceInstanceID: globalconfig.RuntimeID(),
+ ServiceName: c.serviceName,
+ ServiceVersion: c.version,
+ TelemetrySDKLanguage: "go",
+ TelemetrySDKVersion: version.Tag,
+ TelemetrySdkName: "dd-trace-go",
+ }
+
+ data, _ = processContext.MarshalMsg(nil)
+ err = globalinternal.CreateOtelProcessContextMapping(data)
+ if err != nil {
+ log.Error("failed to store the OTEL process context: %s", err.Error())
+ }
}
// Stop stops the started tracer. Subsequent calls are valid but become no-op.
@@ -454,15 +490,6 @@ func newTracer(opts ...StartOption) (*tracer, error) {
t.reportRuntimeMetrics(defaultMetricsReportInterval)
}()
}
- if c.runtimeMetricsV2 {
- l := slog.New(slogHandler{})
- opts := &runtimemetrics.Options{Logger: l}
- if t.runtimeMetrics, err = runtimemetrics.NewEmitter(t.statsd, opts); err == nil {
- l.Debug("Runtime metrics v2 enabled.")
- } else {
- l.Error("Failed to enable runtime metrics v2", "err", err.Error())
- }
- }
if c.debugAbandonedSpans {
log.Info("Abandoned spans logs enabled.")
t.abandonedSpansDebugger = newAbandonedSpansDebugger()
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/transport.go b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/transport.go
index 17e92f4b6..8c58eb86d 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/transport.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/ddtrace/tracer/transport.go
@@ -9,7 +9,6 @@ import (
"bytes"
"fmt"
"io"
- "net"
"net/http"
"runtime"
"strconv"
@@ -31,32 +30,6 @@ const (
headerComputedTopLevel = "Datadog-Client-Computed-Top-Level"
)
-func defaultDialer(timeout time.Duration) *net.Dialer {
- return &net.Dialer{
- Timeout: timeout,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }
-}
-
-func defaultHTTPClient(timeout time.Duration, disableKeepAlives bool) *http.Client {
- if timeout == 0 {
- timeout = defaultHTTPTimeout
- }
- return &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: defaultDialer(timeout).DialContext,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- DisableKeepAlives: disableKeepAlives,
- },
- Timeout: timeout,
- }
-}
-
const (
defaultHostname = "localhost"
defaultPort = "8126"
@@ -171,13 +144,13 @@ func (t *httpTransport) send(p payload) (body io.ReadCloser, err error) {
req.Header.Set(header, value)
}
req.Header.Set(traceCountHeader, strconv.Itoa(stats.itemCount))
- req.Header.Set(headerComputedTopLevel, "yes")
+ req.Header.Set(headerComputedTopLevel, "t")
if t := getGlobalTracer(); t != nil {
tc := t.TracerConf()
if tc.TracingAsTransport || tc.CanComputeStats {
// tracingAsTransport uses this header to disable the trace agent's stats computation
// while making canComputeStats() always false to also disable client stats computation.
- req.Header.Set("Datadog-Client-Computed-Stats", "yes")
+ req.Header.Set("Datadog-Client-Computed-Stats", "t")
}
droppedTraces := int(tracerstats.Count(tracerstats.AgentDroppedP0Traces))
partialTraces := int(tracerstats.Count(tracerstats.PartialTraces))
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/http.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/http.go
index 0f3e4fde1..9c05f7397 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/http.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/http.go
@@ -12,6 +12,8 @@ package httpsec
import (
"context"
+ "sync"
+
// Blank import needed to use embed for the default blocked response payloads
_ "embed"
"net/http"
@@ -26,6 +28,7 @@ import (
"github.com/DataDog/dd-trace-go/v2/internal/log"
"github.com/DataDog/dd-trace-go/v2/internal/telemetry"
telemetrylog "github.com/DataDog/dd-trace-go/v2/internal/telemetry/log"
+ "github.com/DataDog/go-libddwaf/v4"
)
// HandlerOperation type representing an HTTP operation. It must be created with
@@ -47,6 +50,17 @@ type (
// downstreamRequestBodyAnalysis is the number of times a call to a downstream request body monitoring function was made.
downstreamRequestBodyAnalysis atomic.Int32
+
+ // downstreamRequestOverrides holds behavioral overrides for future downstream requests, related
+ // to a redirect chain.
+ downstreamRequestOverrides map[string]DownstreamRequestOverride
+ downstreamRequestOverridesMu sync.Mutex
+ }
+
+ DownstreamRequestOverride struct {
+ DownstreamURL string
+ AnalyzeBody bool
+ OriginalRequestBody libddwaf.Encodable
}
// HandlerOperationArgs is the HTTP handler operation arguments.
@@ -97,6 +111,16 @@ func StartOperation(ctx context.Context, args HandlerOperationArgs, span trace.T
action.Store(a)
})
+ dyngo.OnData(op, func(evt DownstreamRequestOverride) {
+ op.downstreamRequestOverridesMu.Lock()
+ defer op.downstreamRequestOverridesMu.Unlock()
+
+ if op.downstreamRequestOverrides == nil {
+ op.downstreamRequestOverrides = make(map[string]DownstreamRequestOverride, 1)
+ }
+ op.downstreamRequestOverrides[evt.DownstreamURL] = evt
+ })
+
return op, &action, dyngo.StartAndRegisterOperation(ctx, op, args)
}
@@ -120,6 +144,25 @@ func (op *HandlerOperation) DownstreamRequestBodyAnalysis() int {
return int(op.downstreamRequestBodyAnalysis.Load())
}
+// HasDownstreamRequestOverride checks if a downstream request override exists for the given URL,
+// meaning it is part of a redirect chain.
+func (op *HandlerOperation) HasDownstreamRequestOverride(url string) bool {
+ op.downstreamRequestOverridesMu.Lock()
+ defer op.downstreamRequestOverridesMu.Unlock()
+ _, ok := op.downstreamRequestOverrides[url]
+ return ok
+}
+
+// ConsumeDownstreamRequestOverride consumes and removes a downstream request override for the given
+// URL, returning the override data.
+func (op *HandlerOperation) ConsumeDownstreamRequestOverride(url string) (DownstreamRequestOverride, bool) {
+ op.downstreamRequestOverridesMu.Lock()
+ defer op.downstreamRequestOverridesMu.Unlock()
+ override, ok := op.downstreamRequestOverrides[url]
+ delete(op.downstreamRequestOverrides, url)
+ return override, ok
+}
+
// IncrementDownstreamRequestBodyAnalysis increments the number of times a call to a downstream request body monitoring function was made.
func (op *HandlerOperation) IncrementDownstreamRequestBodyAnalysis() {
op.downstreamRequestBodyAnalysis.Add(1)
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/roundtripper.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/roundtripper.go
index a04908d01..3f8374bee 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/roundtripper.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/httpsec/roundtripper.go
@@ -14,6 +14,7 @@ import (
"github.com/DataDog/dd-trace-go/v2/appsec/events"
"github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo"
"github.com/DataDog/dd-trace-go/v2/internal/log"
+ "github.com/DataDog/go-libddwaf/v4"
)
var badInputContextOnce sync.Once
@@ -23,7 +24,9 @@ type (
dyngo.Operation
HandlerOp *HandlerOperation
+ url string
analyseBody bool
+ requestbody libddwaf.Encodable
}
// RoundTripOperationArgs is the round trip operation arguments.
@@ -51,6 +54,14 @@ func (r *RoundTripOperation) AnalyseBody() bool {
return r.analyseBody
}
+func (r *RoundTripOperation) SetRequestBody(body libddwaf.Encodable) {
+ r.requestbody = body
+}
+
+func (r *RoundTripOperation) RequestBody() libddwaf.Encodable {
+ return r.requestbody
+}
+
func (RoundTripOperationArgs) IsArgOf(*RoundTripOperation) {}
func (RoundTripOperationRes) IsResultOf(*RoundTripOperation) {}
@@ -80,6 +91,7 @@ func ProtectRoundTrip(ctx context.Context, req *http.Request) (func(*http.Respon
op := &RoundTripOperation{
Operation: dyngo.NewOperation(handlerOp),
HandlerOp: handlerOp,
+ url: req.URL.String(),
}
var err *events.BlockingSecurityEvent
@@ -107,3 +119,7 @@ func ProtectRoundTrip(ctx context.Context, req *http.Request) (func(*http.Respon
dyngo.FinishOperation(op, resArgs)
}, nil
}
+
+func (r *RoundTripOperation) URL() string {
+ return r.url
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/block.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/block.go
index 43ad2714c..ba5017385 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/block.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/block.go
@@ -6,12 +6,12 @@
package actions
import (
+ "bytes"
_ "embed" // embed is used to embed the blocked-template.json and blocked-template.html files
"net/http"
"os"
"strings"
-
- "github.com/go-viper/mapstructure/v2"
+ "unsafe"
"github.com/DataDog/dd-trace-go/v2/appsec/events"
"github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo"
@@ -30,8 +30,9 @@ var blockedTemplateJSON []byte
var blockedTemplateHTML []byte
const (
- envBlockedTemplateHTML = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_HTML"
- envBlockedTemplateJSON = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_JSON"
+ envBlockedTemplateHTML = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_HTML"
+ envBlockedTemplateJSON = "DD_APPSEC_HTTP_BLOCKED_TEMPLATE_JSON"
+ securityResponsePlaceholder = "[security_response_id]"
)
func init() {
@@ -52,11 +53,12 @@ type (
// blockActionParams are the dynamic parameters to be provided to a "block_request"
// action type upon invocation
blockActionParams struct {
- // GRPCStatusCode is the gRPC status code to be returned. Since 0 is the OK status, the value is nullable to
- // be able to distinguish between unset and defaulting to Abort (10), or set to OK (0).
- GRPCStatusCode *int `mapstructure:"grpc_status_code,omitempty"`
- StatusCode int `mapstructure:"status_code"`
- Type string `mapstructure:"type,omitempty"`
+ // GRPCStatusCode is the gRPC status code to be returned. Since 0 is the OK status, the value defaults to Abort (10)
+ // if not set to OK (0).
+ GRPCStatusCode int
+ StatusCode int
+ Type string
+ SecurityResponseID string
}
// GRPCWrapper is an opaque prototype abstraction for a gRPC handler (to avoid importing grpc)
// that returns a status code and an error
@@ -73,6 +75,40 @@ type (
}
)
+func (b *blockActionParams) Decode(p map[string]any) error {
+ for k := range p {
+ switch k {
+ case "grpc_status_code":
+ v, err := decodeInt(p, k)
+ if err != nil {
+ return err
+ }
+ b.GRPCStatusCode = v
+ case "status_code":
+ v, err := decodeInt(p, k)
+ if err != nil {
+ return err
+ }
+ b.StatusCode = v
+ case "security_response_id":
+ v, err := decodeStr(p, k)
+ if err != nil {
+ return err
+ }
+ b.SecurityResponseID = v
+ case "type":
+ v, err := decodeStr(p, k)
+ if err != nil {
+ return err
+ }
+ b.Type = v
+ default:
+ // We ignore any other field.
+ }
+ }
+ return nil
+}
+
func (a *BlockGRPC) EmitData(op dyngo.Operation) {
dyngo.EmitData(op, a)
dyngo.EmitData(op, &events.BlockingSecurityEvent{})
@@ -94,21 +130,14 @@ func newGRPCBlockHandler(status int) GRPCWrapper {
}
func blockParamsFromMap(params map[string]any) (blockActionParams, error) {
- grpcCode := 10
p := blockActionParams{
Type: "auto",
StatusCode: 403,
- GRPCStatusCode: &grpcCode,
+ GRPCStatusCode: 10,
}
-
- if err := mapstructure.WeakDecode(params, &p); err != nil {
+ if err := p.Decode(params); err != nil {
return p, err
}
-
- if p.GRPCStatusCode == nil {
- p.GRPCStatusCode = &grpcCode
- }
-
return p, nil
}
@@ -120,19 +149,19 @@ func NewBlockAction(params map[string]any) []Action {
return nil
}
return []Action{
- newHTTPBlockRequestAction(p.StatusCode, p.Type),
- newGRPCBlockRequestAction(*p.GRPCStatusCode),
+ newHTTPBlockRequestAction(p.StatusCode, p.Type, p.SecurityResponseID),
+ newGRPCBlockRequestAction(p.GRPCStatusCode),
}
}
-func newHTTPBlockRequestAction(status int, template string) *BlockHTTP {
- return &BlockHTTP{Handler: newBlockHandler(status, template)}
+func newHTTPBlockRequestAction(status int, template string, securityResponseID string) *BlockHTTP {
+ return &BlockHTTP{Handler: newBlockHandler(status, template, securityResponseID)}
}
// newBlockHandler creates, initializes and returns a new BlockRequestAction
-func newBlockHandler(status int, template string) http.Handler {
- htmlHandler := newBlockRequestHandler(status, "text/html", blockedTemplateHTML)
- jsonHandler := newBlockRequestHandler(status, "application/json", blockedTemplateJSON)
+func newBlockHandler(status int, template string, securityResponseID string) http.Handler {
+ htmlHandler := newBlockRequestHandler(status, "text/html", blockedTemplateHTML, securityResponseID)
+ jsonHandler := newBlockRequestHandler(status, "application/json", blockedTemplateJSON, securityResponseID)
switch template {
case "json":
return jsonHandler
@@ -153,10 +182,16 @@ func newBlockHandler(status int, template string) http.Handler {
}
}
-func newBlockRequestHandler(status int, ct string, payload []byte) http.Handler {
+func newBlockRequestHandler(status int, ct string, payload []byte, securityResponseID string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", ct)
w.WriteHeader(status)
- w.Write(payload)
+ w.Write(renderSecurityResponsePayload(payload, securityResponseID))
})
}
+
+func renderSecurityResponsePayload(payload []byte, securityResponseID string) []byte {
+ securityResponseBytes := []byte(securityResponseID)
+ placeholderBytes := unsafe.Slice(unsafe.StringData(securityResponsePlaceholder), len(securityResponsePlaceholder))
+ return bytes.ReplaceAll(payload, placeholderBytes, securityResponseBytes)
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.html b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.html
index b43edd96d..0d638df75 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.html
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.html
@@ -1 +1 @@
-
You've been blockedSorry, you cannot access this page. Please contact the customer service team.
\ No newline at end of file
+You've been blockedSorry, you cannot access this page. Please contact the customer service team.
Security Response ID: [security_response_id]
\ No newline at end of file
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.json b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.json
index 12ae29696..21613c675 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.json
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/blocked-template.json
@@ -1 +1 @@
-{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]}
\ No newline at end of file
+{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}],"security_response_id":"[security_response_id]"}
\ No newline at end of file
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/decoder.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/decoder.go
new file mode 100644
index 000000000..c26e1a0f5
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/decoder.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+package actions
+
+import "fmt"
+
+const errDecodingFmt = "decoding failed for %q"
+
+func decodeInt(p map[string]any, k string) (int, error) {
+ v, ok := p[k].(uint64)
+ if !ok {
+ return 0, fmt.Errorf(errDecodingFmt, k)
+ }
+ return int(v), nil
+}
+
+func decodeStr(p map[string]any, k string) (string, error) {
+ v, ok := p[k].(string)
+ if !ok {
+ return "", fmt.Errorf(errDecodingFmt, k)
+ }
+ return v, nil
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/http_redirect.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/http_redirect.go
index 562bc31ad..d94b78722 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/http_redirect.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/actions/http_redirect.go
@@ -7,8 +7,7 @@ package actions
import (
"net/http"
-
- "github.com/go-viper/mapstructure/v2"
+ "strings"
"github.com/DataDog/dd-trace-go/v2/internal/log"
)
@@ -16,8 +15,35 @@ import (
// redirectActionParams are the dynamic parameters to be provided to a "redirect_request"
// action type upon invocation
type redirectActionParams struct {
- Location string `mapstructure:"location,omitempty"`
- StatusCode int `mapstructure:"status_code"`
+ Location string
+ StatusCode int
+ SecurityResponseID string
+}
+
+func (r *redirectActionParams) Decode(p map[string]any) error {
+ for k := range p {
+ switch k {
+ case "location":
+ v, err := decodeStr(p, k)
+ if err != nil {
+ return err
+ }
+ r.Location = v
+ case "status_code":
+ v, err := decodeInt(p, k)
+ if err != nil {
+ return err
+ }
+ r.StatusCode = v
+ case "security_response_id":
+ v, err := decodeStr(p, k)
+ if err != nil {
+ return err
+ }
+ r.SecurityResponseID = v
+ }
+ }
+ return nil
}
func init() {
@@ -26,11 +52,11 @@ func init() {
func redirectParamsFromMap(params map[string]any) (redirectActionParams, error) {
var p redirectActionParams
- err := mapstructure.WeakDecode(params, &p)
+ err := p.Decode(params)
return p, err
}
-func newRedirectRequestAction(status int, loc string) *BlockHTTP {
+func newRedirectRequestAction(status int, loc string, securityResponseID string) *BlockHTTP {
// Default to 303 if status is out of redirection codes bounds
if status < http.StatusMultipleChoices || status >= http.StatusBadRequest {
status = http.StatusSeeOther
@@ -38,8 +64,9 @@ func newRedirectRequestAction(status int, loc string) *BlockHTTP {
// If location is not set we fall back on a default block action
if loc == "" {
- return &BlockHTTP{Handler: newBlockHandler(http.StatusForbidden, string(blockedTemplateJSON))}
+ return &BlockHTTP{Handler: newBlockHandler(http.StatusForbidden, "auto", securityResponseID)}
}
+ loc = strings.ReplaceAll(loc, securityResponsePlaceholder, securityResponseID)
return &BlockHTTP{Handler: http.RedirectHandler(loc, status)}
}
@@ -50,5 +77,5 @@ func NewRedirectAction(params map[string]any) []Action {
log.Debug("appsec: couldn't decode redirect action parameters")
return nil
}
- return []Action{newRedirectRequestAction(p.StatusCode, p.Location)}
+ return []Action{newRedirectRequestAction(p.StatusCode, p.Location, p.SecurityResponseID)}
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/builder.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/builder.go
index 14bbe6535..74f1e8bcd 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/builder.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/emitter/waf/addresses/builder.go
@@ -167,7 +167,6 @@ func (b *RunAddressDataBuilder) WithDownwardMethod(method string) *RunAddressDat
return b
}
b.Ephemeral[ServerIONetRequestMethodAddr] = method
- b.TimerKey = RASPScope
return b
}
@@ -176,7 +175,6 @@ func (b *RunAddressDataBuilder) WithDownwardRequestHeaders(headers map[string][]
return b
}
b.Ephemeral[ServerIONetRequestHeadersAddr] = headers
- b.TimerKey = RASPScope
return b
}
@@ -211,7 +209,6 @@ func (b *RunAddressDataBuilder) WithDownwardResponseHeaders(headers map[string][
return b
}
b.Ephemeral[ServerIONetResponseHeadersAddr] = headers
- b.TimerKey = RASPScope
return b
}
@@ -220,7 +217,6 @@ func (b *RunAddressDataBuilder) WithDownwardResponseBody(body any) *RunAddressDa
return b
}
b.Ephemeral[ServerIONetResponseBodyAddr] = body
- b.TimerKey = RASPScope
return b
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace/errortrace.go b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace/errortrace.go
index 37bf9cc59..dea06c31f 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace/errortrace.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/instrumentation/errortrace/errortrace.go
@@ -6,27 +6,22 @@
package errortrace
import (
- "bytes"
"errors"
"fmt"
- "runtime"
- "strconv"
"strings"
"time"
+ "github.com/DataDog/dd-trace-go/v2/internal/stacktrace"
"github.com/DataDog/dd-trace-go/v2/internal/telemetry"
)
// TracerError is an error type that holds stackframes from when the error was thrown.
// It can be used interchangeably with the built-in Go error type.
type TracerError struct {
- stackFrames *runtime.Frames
- inner error
- stack *bytes.Buffer
-}
+ rawStack stacktrace.RawStackTrace
-// defaultStackLength specifies the default maximum size of a stack trace.
-const defaultStackLength = 32
+ inner error
+}
func (err *TracerError) Error() string {
return err.inner.Error()
@@ -39,13 +34,13 @@ func New(text string) *TracerError {
// Wrap takes in an error and records the stack trace at the moment that it was thrown.
func Wrap(err error) *TracerError {
- return WrapN(err, 0, 1)
+ return WrapN(err, 1)
}
// WrapN takes in an error and records the stack trace at the moment that it was thrown.
-// It will capture a maximum of `n` entries, skipping the first `skip` entries.
-// If n is 0, it will capture up to 32 entries instead.
-func WrapN(err error, n uint, skip uint) *TracerError {
+// Note: The n parameter is ignored; internal/stacktrace uses its own default depth.
+// The skip parameter specifies how many stack frames to skip before capturing.
+func WrapN(err error, skip uint) *TracerError {
if err == nil {
return nil
}
@@ -53,9 +48,6 @@ func WrapN(err error, n uint, skip uint) *TracerError {
if errors.As(err, &e) {
return e
}
- if n <= 0 {
- n = defaultStackLength
- }
telemetry.Count(telemetry.NamespaceTracers, "errorstack.source", []string{"source:TracerError"}).Submit(1)
now := time.Now()
@@ -64,53 +56,24 @@ func WrapN(err error, n uint, skip uint) *TracerError {
telemetry.Distribution(telemetry.NamespaceTracers, "errorstack.duration", []string{"source:TracerError"}).Submit(dur)
}()
- pcs := make([]uintptr, n)
- var stackFrames *runtime.Frames
- // +2 to exclude runtime.Callers and Wrap
- numFrames := runtime.Callers(2+int(skip), pcs)
- if numFrames == 0 {
- stackFrames = nil
- } else {
- stackFrames = runtime.CallersFrames(pcs[:numFrames])
- }
+ // Use SkipAndCaptureUnfiltered to capture all frames including internal DD frames.
+ // +4 to account for: runtime.Callers, iterator, SkipAndCaptureUnfiltered, and this WrapN function
+ stack := stacktrace.CaptureRaw(int(skip) + 2)
tracerErr := &TracerError{
- stackFrames: stackFrames,
- inner: err,
+ rawStack: stack,
+ inner: err,
}
return tracerErr
}
// Format returns a string representation of the stack trace.
+// Uses the centralized internal/stacktrace formatting.
func (err *TracerError) Format() string {
- if err == nil || err.stackFrames == nil {
+ if err == nil {
return ""
}
- if err.stack != nil {
- return err.stack.String()
- }
-
- out := bytes.Buffer{}
- for i := 0; ; i++ {
- frame, more := err.stackFrames.Next()
- if i != 0 {
- out.WriteByte('\n')
- }
- out.WriteString(frame.Function)
- out.WriteByte('\n')
- out.WriteByte('\t')
- out.WriteString(frame.File)
- out.WriteByte(':')
- out.WriteString(strconv.Itoa(frame.Line))
- if !more {
- break
- }
- }
- // CallersFrames returns an iterator that is consumed as we read it. In order to
- // allow calling Format() multiple times, we save the result into err.stack, which can be
- // returned in future calls
- err.stack = &out
- return out.String()
+ return stacktrace.Format(err.rawStack.Symbolicate())
}
// Errorf serves the same purpose as fmt.Errorf, but returns a TracerError
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/agent.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/agent.go
index d80e83293..0f4eef771 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/agent.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/agent.go
@@ -7,11 +7,21 @@ package internal
import (
"net"
+ "net/http"
"net/url"
"os"
+ "time"
"github.com/DataDog/dd-trace-go/v2/internal/env"
"github.com/DataDog/dd-trace-go/v2/internal/log"
+
+ // OTel did a breaking change to the module go.opentelemetry.io/collector/pdata which is imported by the agent
+ // and go.opentelemetry.io/collector/pdata/pprofile depends on it and is breaking because of it
+ // For some reason the dependency closure won't let use upgrade this module past the point where it does not break anymore
+ // So we are forced to add a blank import of this module to give us back the control over its version
+ //
+ // TODO: remove this once github.com/datadog-agent/pkg/trace has upgraded both modules past the breaking change
+ _ "go.opentelemetry.io/collector/pdata/pprofile"
)
const (
@@ -74,3 +84,26 @@ func AgentURLFromEnv() *url.URL {
}
return httpURL
}
+
+func DefaultDialer(timeout time.Duration) *net.Dialer {
+ return &net.Dialer{
+ Timeout: timeout,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+}
+
+func DefaultHTTPClient(timeout time.Duration, disableKeepAlives bool) *http.Client {
+ return &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: DefaultDialer(timeout).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ DisableKeepAlives: disableKeepAlives,
+ },
+ Timeout: timeout,
+ }
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/body/json/jsoniter.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/body/json/jsoniter.go
index 9c1c38f7d..ce3fdd99e 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/body/json/jsoniter.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/body/json/jsoniter.go
@@ -6,6 +6,7 @@
package json
import (
+ "bytes"
"encoding/json"
"errors"
"fmt"
@@ -23,6 +24,10 @@ type jsonIterEncodable struct {
}
func newJSONIterEncodableFromData(data []byte, truncated bool) libddwaf.Encodable {
+ // Leading and trailing whitespace carries no semantic value in JSON, so we
+ // trim it in order to avoid having to worry about those when doing
+ // parsing completeness assertions.
+ data = bytes.TrimSpace(data)
return &jsonIterEncodable{
truncated: truncated,
data: data,
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/metrics.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/metrics.go
index f4658347f..61dea4073 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/metrics.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/emitter/waf/metrics.go
@@ -173,6 +173,9 @@ func (m *HandleMetrics) NewContextMetrics() *ContextMetrics {
type ContextMetrics struct {
*HandleMetrics
+ // SumDownstreamRequestsCalls is the sum of all the downstream requests calls analyzed by the WAF.
+ SumDownstreamRequestsCalls atomic.Uint32
+
// SumRASPCalls is the sum of all the RASP calls made by the WAF whatever the rasp rule type it is.
SumRASPCalls atomic.Uint32
// SumWAFErrors is the sum of all the WAF errors that happened not in the RASP scope.
@@ -348,6 +351,8 @@ func (m *ContextMetrics) IncWafError(addrs libddwaf.RunAddressData, in error) {
if !errors.Is(in, waferrors.ErrTimeout) {
logger := m.logger.With(telemetry.WithTags(m.baseTags))
+ // This a known error origin all the ways to the tip of the error chain and since it impact WAF
+ // behavior we really want to log it so we can investigate it so we don't wrap it in a safe error
logger.Error("unexpected WAF error", slog.Any("error", telemetrylog.NewSafeError(in)))
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/roundtripper.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/roundtripper.go
index c6f4af9d0..5ac88a035 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/roundtripper.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/httpsec/roundtripper.go
@@ -8,6 +8,7 @@ package httpsec
import (
"log/slog"
"net/http"
+ "net/url"
"sync/atomic"
"github.com/DataDog/dd-trace-go/v2/instrumentation/appsec/dyngo"
@@ -72,22 +73,29 @@ func (feature *DownwardRequestFeature) OnStart(op *httpsec.RoundTripOperation, a
WithDownwardMethod(args.Method).
WithDownwardRequestHeaders(args.Headers)
+ // Increment the span metric for downward requests
+ op.HandlerOp.ContextOperation.GetMetricsInstance().SumDownstreamRequestsCalls.Add(1)
+
+ // Increment the internal sampling counter for downward requests
requestCount := feature.downstreamRequestAnalysis.Add(1)
+ hasDownstreamOverride := op.HandlerOp.HasDownstreamRequestOverride(op.URL())
+
// Sampling algorithm based on:
// https://docs.google.com/document/d/1DIGuCl1rkhx5swmGxKO7Je8Y4zvaobXBlgbm6C89yzU/edit?tab=t.0#heading=h.qawhep7pps5a
- if op.HandlerOp.DownstreamRequestBodyAnalysis() < feature.maxDownstreamRequestBodyAnalysis &&
+ if !hasDownstreamOverride && op.HandlerOp.DownstreamRequestBodyAnalysis() < feature.maxDownstreamRequestBodyAnalysis &&
requestCount*knuthFactor <= uint64(feature.analysisSampleRate*maxUint64) {
op.HandlerOp.IncrementDownstreamRequestBodyAnalysis()
op.SetAnalyseBody()
}
- if args.Body != nil && *args.Body != nil && *args.Body != http.NoBody && op.AnalyseBody() {
+ if op.AnalyseBody() && args.Body != nil && *args.Body != nil && *args.Body != http.NoBody {
encodable, err := body.NewEncodable(http.Header(args.Headers).Get("Content-Type"), args.Body, maxBodyParseSize)
if err != nil {
log.Debug("Unsupported response body content type or error reading body: %s", err.Error())
telemetrylog.Warn("Unsupported request body content type or error reading body", slog.Any("error", telemetrylog.NewSafeError(err)))
}
+ op.SetRequestBody(encodable)
builder = builder.WithDownwardRequestBody(encodable)
}
@@ -99,7 +107,49 @@ func (feature *DownwardRequestFeature) OnFinish(op *httpsec.RoundTripOperation,
WithDownwardResponseStatus(args.StatusCode).
WithDownwardResponseHeaders(headersToLower(args.Headers))
- if args.Body != nil && *args.Body != nil && *args.Body != http.NoBody && op.AnalyseBody() {
+ location := http.Header(args.Headers).Get("Location")
+ isRedirect := args.StatusCode >= 300 && args.StatusCode <= 399 && location != ""
+
+ var (
+ analyzeBody bool
+ requestBody = op.RequestBody()
+ resubmitRequestBody = false
+ )
+ if override, found := op.HandlerOp.ConsumeDownstreamRequestOverride(op.URL()); found {
+ // We are in a downstream request identified as part of a redirect chain. We use the original
+ // sampling decision instead of making a new one.
+ analyzeBody = override.AnalyzeBody
+ requestBody = override.OriginalRequestBody
+ // If we're at the end of a redirect chain, we re-submit the request body to assess data leakage
+ // to un-trusted authorities.
+ resubmitRequestBody = true
+ } else {
+ analyzeBody = op.AnalyseBody()
+ }
+
+ if isRedirect {
+ opURL, err := url.Parse(op.URL())
+ if err == nil {
+ url, err := opURL.Parse(location)
+ if err == nil {
+ event := httpsec.DownstreamRequestOverride{
+ DownstreamURL: url.String(),
+ AnalyzeBody: analyzeBody,
+ }
+ // Only HTTP 307 and 308 result in the body being re-submitted by the client.
+ if args.StatusCode == http.StatusTemporaryRedirect || args.StatusCode == http.StatusPermanentRedirect {
+ event.OriginalRequestBody = requestBody
+ }
+ dyngo.EmitData(op.HandlerOp, event)
+ }
+ }
+ }
+
+ if analyzeBody && !isRedirect && resubmitRequestBody && requestBody != nil {
+ builder = builder.WithDownwardRequestBody(requestBody)
+ }
+
+ if analyzeBody && !isRedirect && args.Body != nil && *args.Body != nil && *args.Body != http.NoBody {
encodable, err := body.NewEncodable(http.Header(args.Headers).Get("Content-Type"), args.Body, maxBodyParseSize)
if err != nil {
log.Debug("Unsupported response body content type or error reading body: %s", err.Error())
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/tags.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/tags.go
index 6ce403e4d..5a69c328f 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/tags.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/tags.go
@@ -31,7 +31,8 @@ const (
durationExtSuffix = ".duration_ext"
- blockedRequestTag = "appsec.blocked"
+ blockedRequestTag = "appsec.blocked"
+ downwardRequestTag = wafSpanTagPrefix + "downstream_request"
)
// AddRulesMonitoringTags adds the tags related to security rules monitoring
@@ -40,6 +41,14 @@ func AddRulesMonitoringTags(th trace.TagSetter) {
th.SetTag(ext.ManualKeep, samplernames.AppSec)
}
+func addDownwardRequestTag(th trace.TagSetter, value int) {
+ if value == 0 {
+ return
+ }
+
+ th.SetTag(downwardRequestTag, value)
+}
+
// AddWAFMonitoringTags adds the tags related to the monitoring of the WAF
func AddWAFMonitoringTags(th trace.TagSetter, metrics *emitter.ContextMetrics, rulesVersion string, truncations map[libddwaf.TruncationReason][]int, timerStats map[timer.Key]time.Duration) {
// Rules version is set for every request to help the backend associate Feature duration metrics with rule version
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/waf.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/waf.go
index 2931d671d..7565e1760 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/waf.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf/waf.go
@@ -143,6 +143,7 @@ func (waf *Feature) onFinish(op *waf.ContextOperation, _ waf.ContextRes) {
timerStats := ctx.Timer.Stats()
metrics := op.GetMetricsInstance()
AddWAFMonitoringTags(op, metrics, waf.rulesVersion, truncations, timerStats)
+ addDownwardRequestTag(op, int(metrics.SumDownstreamRequestsCalls.Load()))
metrics.Submit(truncations, timerStats)
if wafEvents := op.Events(); len(wafEvents) > 0 {
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/env.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/env.go
index ad6e485a4..fb46466bc 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/env.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants/env.go
@@ -55,4 +55,10 @@ const (
// CIVisibilityInternalParallelEarlyFlakeDetectionEnabled indicates if the internal parallel early flake detection feature is enabled.
CIVisibilityInternalParallelEarlyFlakeDetectionEnabled = "DD_CIVISIBILITY_INTERNAL_PARALLEL_EARLY_FLAKE_DETECTION_ENABLED"
+
+ // CIVisibilitySubtestFeaturesEnabled indicates if subtest-specific management and retry features are enabled.
+ CIVisibilitySubtestFeaturesEnabled = "DD_CIVISIBILITY_SUBTEST_FEATURES_ENABLED"
+
+ // CIVisibilityUseNoopTracer indicates if the ci visibility mode must set a noop tracer (avoid change current test behaviors over the noop tracer implementation)
+ CIVisibilityUseNoopTracer = "DD_CIVISIBILITY_USE_NOOP_TRACER"
)
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/environmentTags.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/environmentTags.go
index 2854b67b0..595ea997b 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/environmentTags.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/environmentTags.go
@@ -310,7 +310,7 @@ func createCITagsMap() map[string]string {
// If the head commit SHA is available, populate additional Git head metadata
if headCommitSha, ok := localTags[constants.GitHeadCommit]; ok {
if headCommitData, err := fetchCommitData(headCommitSha); err != nil {
- log.Warn("civisibility: failed to fetch head commit data: %s", err.Error())
+ log.Warn("civisibility: failed to fetch head commit data for %s: %s", headCommitSha, err.Error())
} else if headCommitSha == headCommitData.CommitSha {
localTags[constants.GitHeadAuthorDate] = headCommitData.AuthorDate.String()
localTags[constants.GitHeadAuthorName] = headCommitData.AuthorName
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/git.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/git.go
index 972aaccec..a102f603d 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/git.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/git.go
@@ -142,9 +142,9 @@ func execGit(commandType telemetry.CommandType, args ...string) (val []byte, err
defer func() {
durationInMs := time.Since(startTime).Milliseconds()
if err != nil {
- log.Debug("civisibility.git.command [%s][%s][%dms]: git %s", commandType, err.Error(), durationInMs, strings.Join(args, " "))
+ log.Debug("civisibility.git.command [%s][%s][%dms]: git %s\n%s", commandType, err.Error(), durationInMs, strings.Join(args, " "), string(val))
} else {
- log.Debug("civisibility.git.command [%s][%dms]: git %s", commandType, durationInMs, strings.Join(args, " "))
+ log.Debug("civisibility.git.command [%s][%dms]: git %s\n%s", commandType, durationInMs, strings.Join(args, " "), string(val))
}
}()
}
@@ -197,9 +197,9 @@ func execGitStringWithInput(commandType telemetry.CommandType, input string, arg
defer func() {
durationInMs := time.Since(startTime).Milliseconds()
if err != nil {
- log.Debug("civisibility.git.command [%s][%s][%dms]: git %s", commandType, err.Error(), durationInMs, strings.Join(args, " "))
+ log.Debug("civisibility.git.command(input) [%s][%s][%dms]: git %s\n%s", commandType, err.Error(), durationInMs, strings.Join(args, " "), val)
} else {
- log.Debug("civisibility.git.command [%s][%dms]: git %s", commandType, durationInMs, strings.Join(args, " "))
+ log.Debug("civisibility.git.command(input) [%s][%dms]: git %s\n%s", commandType, durationInMs, strings.Join(args, " "), val)
}
}()
}
@@ -261,6 +261,14 @@ func getLocalGitData() (localGitData, error) {
if out, err := execGitString(telemetry.GitAddPermissionCommandType, "config", "--global", "--add", "safe.directory", gitDir); err != nil {
log.Debug("civisibility.git: error while setting permissions to git folder: %s\n out: %s\n error: %s", gitDir, out, err.Error())
}
+ // if the git folder contains with a `/.git` then we also add permission to the parent.
+ if strings.HasSuffix(gitDir, "/.git") {
+ parentGitDir := strings.TrimSuffix(gitDir, "/.git")
+ log.Debug("civisibility.git: setting permissions to git folder: %s", parentGitDir)
+ if out, err := execGitString(telemetry.GitAddPermissionCommandType, "config", "--global", "--add", "safe.directory", parentGitDir); err != nil {
+ log.Debug("civisibility.git: error while setting permissions to git folder: %s\n out: %s\n error: %s", parentGitDir, out, err.Error())
+ }
+ }
} else {
log.Debug("civisibility.git: error getting the parent git folder.")
}
@@ -648,18 +656,38 @@ func CreatePackFiles(commitsToInclude []string, commitsToExclude []string) []str
objectsShasString += objectSha + "\n"
}
- // get a temporary path to store the pack files
- temporaryPath, err := os.MkdirTemp("", "pack-objects")
- if err != nil {
- log.Warn("civisibility: error creating temporary directory: %s", err.Error())
- return nil
+ workingDirectory := func() string {
+ wd, err := os.Getwd()
+ if err != nil {
+ return "."
+ }
+ return wd
+ }
+
+ var temporaryPath string
+ var out string
+ var err error
+
+ // Git can throw a cross device error if the temporal folder is in a different drive than the .git folder (eg. symbolic link)
+ // to handle this edge case, we first try with a temp folder and if we fail then we try in the working directory folder.
+ for _, folder := range []string{"", workingDirectory()} {
+ // get a temporary path to store the pack files
+ temporaryPath, err = os.MkdirTemp(folder, ".dd-pack-objects")
+ if err != nil {
+ log.Warn("civisibility: error creating temporary directory %s: %s", folder, err.Error())
+ continue
+ }
+
+ // git pack-objects --compression=9 --max-pack-size={MaxPackFileSizeInMb}m "{temporaryPath}"
+ out, err = execGitStringWithInput(telemetry.PackObjectsCommandsType, objectsShasString,
+ "pack-objects", "--compression=9", "--max-pack-size="+strconv.Itoa(MaxPackFileSizeInMb)+"m", temporaryPath+"/")
+ if err == nil {
+ break
+ }
}
- // git pack-objects --compression=9 --max-pack-size={MaxPackFileSizeInMb}m "{temporaryPath}"
- out, err := execGitStringWithInput(telemetry.PackObjectsCommandsType, objectsShasString,
- "pack-objects", "--compression=9", "--max-pack-size="+strconv.Itoa(MaxPackFileSizeInMb)+"m", temporaryPath+"/")
if err != nil {
- log.Warn("civisibility: error creating pack files: %s", err.Error())
+ log.Warn("civisibility: error creating pack files in %s: %s", temporaryPath, err.Error())
return nil
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/config.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/config.go
new file mode 100644
index 000000000..653e9e111
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/config.go
@@ -0,0 +1,126 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+package config
+
+import (
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
+)
+
+var (
+ useFreshConfig bool
+ instance *Config
+ // mu protects instance and useFreshConfig
+ mu sync.Mutex
+)
+
+// Config represents global configuration properties.
+// Config instances should be obtained via Get() which always returns a non-nil value.
+// Methods on Config assume a non-nil receiver and will panic if called on nil.
+type Config struct {
+ mu sync.RWMutex
+ // Config fields are protected by the mutex.
+ agentURL *url.URL
+ debug bool
+ logStartup bool
+ serviceName string
+ version string
+ env string
+ serviceMappings map[string]string
+ hostname string
+ runtimeMetrics bool
+ runtimeMetricsV2 bool
+ profilerHotspots bool
+ profilerEndpoints bool
+ spanAttributeSchemaVersion int
+ peerServiceDefaultsEnabled bool
+ peerServiceMappings map[string]string
+ debugAbandonedSpans bool
+ spanTimeout time.Duration
+ partialFlushMinSpans int
+ partialFlushEnabled bool
+ statsComputationEnabled bool
+ dataStreamsMonitoringEnabled bool
+ dynamicInstrumentationEnabled bool
+ globalSampleRate float64
+ ciVisibilityEnabled bool
+ ciVisibilityAgentless bool
+ logDirectory string
+ traceRateLimitPerSecond float64
+}
+
+// loadConfig initializes and returns a new config by reading from all configured sources.
+// This function is NOT thread-safe and should only be called once through Get's sync.Once.
+func loadConfig() *Config {
+ cfg := new(Config)
+
+ // TODO: Use defaults from config json instead of hardcoding them here
+ cfg.agentURL = provider.getURL("DD_TRACE_AGENT_URL", &url.URL{Scheme: "http", Host: "localhost:8126"})
+ cfg.debug = provider.getBool("DD_TRACE_DEBUG", false)
+ cfg.logStartup = provider.getBool("DD_TRACE_STARTUP_LOGS", false)
+ cfg.serviceName = provider.getString("DD_SERVICE", "")
+ cfg.version = provider.getString("DD_VERSION", "")
+ cfg.env = provider.getString("DD_ENV", "")
+ cfg.serviceMappings = provider.getMap("DD_SERVICE_MAPPING", nil)
+ cfg.hostname = provider.getString("DD_TRACE_SOURCE_HOSTNAME", "")
+ cfg.runtimeMetrics = provider.getBool("DD_RUNTIME_METRICS_ENABLED", false)
+ cfg.runtimeMetricsV2 = provider.getBool("DD_RUNTIME_METRICS_V2_ENABLED", false)
+ cfg.profilerHotspots = provider.getBool("DD_PROFILING_CODE_HOTSPOTS_COLLECTION_ENABLED", false)
+ cfg.profilerEndpoints = provider.getBool("DD_PROFILING_ENDPOINT_COLLECTION_ENABLED", false)
+ cfg.spanAttributeSchemaVersion = provider.getInt("DD_TRACE_SPAN_ATTRIBUTE_SCHEMA", 0)
+ cfg.peerServiceDefaultsEnabled = provider.getBool("DD_TRACE_PEER_SERVICE_DEFAULTS_ENABLED", false)
+ cfg.peerServiceMappings = provider.getMap("DD_TRACE_PEER_SERVICE_MAPPING", nil)
+ cfg.debugAbandonedSpans = provider.getBool("DD_TRACE_DEBUG_ABANDONED_SPANS", false)
+ cfg.spanTimeout = provider.getDuration("DD_TRACE_ABANDONED_SPAN_TIMEOUT", 0)
+ cfg.partialFlushMinSpans = provider.getInt("DD_TRACE_PARTIAL_FLUSH_MIN_SPANS", 0)
+ cfg.partialFlushEnabled = provider.getBool("DD_TRACE_PARTIAL_FLUSH_ENABLED", false)
+ cfg.statsComputationEnabled = provider.getBool("DD_TRACE_STATS_COMPUTATION_ENABLED", false)
+ cfg.dataStreamsMonitoringEnabled = provider.getBool("DD_DATA_STREAMS_ENABLED", false)
+ cfg.dynamicInstrumentationEnabled = provider.getBool("DD_DYNAMIC_INSTRUMENTATION_ENABLED", false)
+ cfg.globalSampleRate = provider.getFloat("DD_TRACE_SAMPLE_RATE", 0.0)
+ cfg.ciVisibilityEnabled = provider.getBool("DD_CIVISIBILITY_ENABLED", false)
+ cfg.ciVisibilityAgentless = provider.getBool("DD_CIVISIBILITY_AGENTLESS_ENABLED", false)
+ cfg.logDirectory = provider.getString("DD_TRACE_LOG_DIRECTORY", "")
+ cfg.traceRateLimitPerSecond = provider.getFloat("DD_TRACE_RATE_LIMIT", 0.0)
+
+ return cfg
+}
+
+// Get returns the global configuration singleton.
+// This function is thread-safe and can be called from multiple goroutines concurrently.
+// The configuration is lazily initialized on first access using sync.Once, ensuring
+// loadConfig() is called exactly once even under concurrent access.
+func Get() *Config {
+ mu.Lock()
+ defer mu.Unlock()
+ if useFreshConfig || instance == nil {
+ instance = loadConfig()
+ }
+
+ return instance
+}
+
+func SetUseFreshConfig(use bool) {
+ mu.Lock()
+ defer mu.Unlock()
+ useFreshConfig = use
+}
+
+func (c *Config) Debug() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.debug
+}
+
+func (c *Config) SetDebug(enabled bool, origin telemetry.Origin) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.debug = enabled
+ telemetry.RegisterAppConfig("DD_TRACE_DEBUG", enabled, origin)
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/configprovider.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/configprovider.go
new file mode 100644
index 000000000..01f043e3f
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/configprovider.go
@@ -0,0 +1,156 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+package config
+
+import (
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
+)
+
+var provider = defaultconfigProvider()
+
+type configProvider struct {
+ sources []configSource // In order of priority
+}
+
+type configSource interface {
+ get(key string) string
+ origin() telemetry.Origin
+}
+
+type idAwareConfigSource interface {
+ configSource
+ getID() string
+}
+
+func defaultconfigProvider() *configProvider {
+ return &configProvider{
+ sources: []configSource{
+ newDeclarativeConfigSource(managedFilePath, telemetry.OriginManagedStableConfig),
+ new(envConfigSource),
+ new(otelEnvConfigSource),
+ newDeclarativeConfigSource(localFilePath, telemetry.OriginLocalStableConfig),
+ },
+ }
+}
+
+// get is a generic helper that iterates through config sources and parses values.
+// The parse function should return the parsed value and true if parsing succeeded, or false otherwise.
+func get[T any](p *configProvider, key string, def T, parse func(string) (T, bool)) T {
+ for _, source := range p.sources {
+ if v := source.get(key); v != "" {
+ var id string
+ if s, ok := source.(idAwareConfigSource); ok {
+ id = s.getID()
+ }
+ if parsed, ok := parse(v); ok {
+ telemetry.RegisterAppConfigs(telemetry.Configuration{Name: key, Value: v, Origin: source.origin(), ID: id})
+ return parsed
+ }
+ }
+ }
+ telemetry.RegisterAppConfigs(telemetry.Configuration{Name: key, Value: def, Origin: telemetry.OriginDefault, ID: telemetry.EmptyID})
+ return def
+}
+
+func (p *configProvider) getString(key string, def string) string {
+ return get(p, key, def, func(v string) (string, bool) {
+ return v, true
+ })
+}
+
+func (p *configProvider) getBool(key string, def bool) bool {
+ return get(p, key, def, func(v string) (bool, bool) {
+ if v == "true" {
+ return true, true
+ } else if v == "false" {
+ return false, true
+ }
+ return false, false
+ })
+}
+
+func (p *configProvider) getInt(key string, def int) int {
+ return get(p, key, def, func(v string) (int, bool) {
+ intVal, err := strconv.Atoi(v)
+ return intVal, err == nil
+ })
+}
+
+func (p *configProvider) getMap(key string, def map[string]string) map[string]string {
+ return get(p, key, def, func(v string) (map[string]string, bool) {
+ m := parseMapString(v)
+ return m, len(m) > 0
+ })
+}
+
+func (p *configProvider) getDuration(key string, def time.Duration) time.Duration {
+ return get(p, key, def, func(v string) (time.Duration, bool) {
+ d, err := time.ParseDuration(v)
+ return d, err == nil
+ })
+}
+
+func (p *configProvider) getFloat(key string, def float64) float64 {
+ return get(p, key, def, func(v string) (float64, bool) {
+ floatVal, err := strconv.ParseFloat(v, 64)
+ return floatVal, err == nil
+ })
+}
+
+func (p *configProvider) getURL(key string, def *url.URL) *url.URL {
+ return get(p, key, def, func(v string) (*url.URL, bool) {
+ u, err := url.Parse(v)
+ return u, err == nil
+ })
+}
+
+// normalizeKey is a helper function for configSource implementations to normalize the key to a valid environment variable name.
+func normalizeKey(key string) string {
+ if strings.HasPrefix(key, "DD_") || strings.HasPrefix(key, "OTEL_") {
+ return key
+ }
+ return "DD_" + strings.ToUpper(key)
+}
+
+// parseMapString parses a string containing key:value pairs separated by comma or space.
+// Format: "key1:value1,key2:value2" or "key1:value1 key2:value2"
+func parseMapString(str string) map[string]string {
+ result := make(map[string]string)
+
+ // Determine separator (comma or space)
+ sep := " "
+ if strings.Contains(str, ",") {
+ sep = ","
+ }
+
+ // Parse each key:value pair
+ for _, pair := range strings.Split(str, sep) {
+ pair = strings.TrimSpace(pair)
+ if pair == "" {
+ continue
+ }
+
+ // Split on colon delimiter
+ kv := strings.SplitN(pair, ":", 2)
+ key := strings.TrimSpace(kv[0])
+ if key == "" {
+ continue
+ }
+
+ var val string
+ if len(kv) == 2 {
+ val = strings.TrimSpace(kv[1])
+ }
+ result[key] = val
+ }
+
+ return result
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/declarativeconfig.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/declarativeconfig.go
new file mode 100644
index 000000000..b25cce3ba
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/declarativeconfig.go
@@ -0,0 +1,30 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+package config
+
+import "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
+
+// declarativeConfig represents a configuration loaded from a YAML source file.
+type declarativeConfig struct {
+ Config map[string]string `yaml:"apm_configuration_default,omitempty"` // Configuration key-value pairs.
+ ID string `yaml:"config_id,omitempty"` // Identifier for the config set.
+}
+
+func (d *declarativeConfig) get(key string) string {
+ return d.Config[key]
+}
+
+func (d *declarativeConfig) getID() string {
+ return d.ID
+}
+
+// emptyDeclarativeConfig creates and returns a new, empty declarativeConfig instance.
+func emptyDeclarativeConfig() *declarativeConfig {
+ return &declarativeConfig{
+ Config: make(map[string]string),
+ ID: telemetry.EmptyID,
+ }
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/declarativeconfigsource.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/declarativeconfigsource.go
new file mode 100644
index 000000000..4b8663d07
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/declarativeconfigsource.go
@@ -0,0 +1,97 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+package config
+
+import (
+ "os"
+
+ "go.yaml.in/yaml/v3"
+
+ "github.com/DataDog/dd-trace-go/v2/internal/log"
+ "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
+)
+
+const (
+ // File paths are supported on linux only
+ localFilePath = "/etc/datadog-agent/application_monitoring.yaml"
+ managedFilePath = "/etc/datadog-agent/managed/datadog-agent/stable/application_monitoring.yaml"
+
+ // maxFileSize defines the maximum size in bytes for declarative config files (4KB). This limit ensures predictable memory use and guards against malformed large files.
+ maxFileSize = 4 * 1024
+)
+
+// declarativeConfigSource represents a source of declarative configuration loaded from a file.
+type declarativeConfigSource struct {
+ filePath string // Path to the configuration file.
+ originValue telemetry.Origin // Origin identifier for telemetry.
+ config *declarativeConfig // Parsed declarative configuration.
+}
+
+func (d *declarativeConfigSource) get(key string) string {
+ return d.config.get(normalizeKey(key))
+}
+
+func (d *declarativeConfigSource) getID() string {
+ return d.config.getID()
+}
+
+func (d *declarativeConfigSource) origin() telemetry.Origin {
+ return d.originValue
+}
+
+// newDeclarativeConfigSource initializes a new declarativeConfigSource from the given file.
+func newDeclarativeConfigSource(filePath string, origin telemetry.Origin) *declarativeConfigSource {
+ return &declarativeConfigSource{
+ filePath: filePath,
+ originValue: origin,
+ config: parseFile(filePath),
+ }
+}
+
+// ParseFile reads and parses the config file at the given path.
+// Returns an empty config if the file doesn't exist or is invalid.
+func parseFile(filePath string) *declarativeConfig {
+ info, err := os.Stat(filePath)
+ if err != nil {
+ // It's expected that the declarative config file may not exist; its absence is not an error.
+ if !os.IsNotExist(err) {
+ log.Warn("Failed to stat declarative config file %q, dropping: %v", filePath, err.Error())
+ }
+ return emptyDeclarativeConfig()
+ }
+
+ if info.Size() > maxFileSize {
+ log.Warn("Declarative config file %s exceeds size limit (%d bytes > %d bytes), dropping",
+ filePath, info.Size(), maxFileSize)
+ return emptyDeclarativeConfig()
+ }
+
+ data, err := os.ReadFile(filePath)
+ if err != nil {
+ // It's expected that the declarative config file may not exist; its absence is not an error.
+ if !os.IsNotExist(err) {
+ log.Warn("Failed to read declarative config file %q, dropping: %v", filePath, err.Error())
+ }
+ return emptyDeclarativeConfig()
+ }
+
+ return fileContentsToConfig(data, filePath)
+}
+
+// fileContentsToConfig parses YAML data into a declarativeConfig struct.
+// Returns an empty config if parsing fails or the data is malformed.
+func fileContentsToConfig(data []byte, fileName string) *declarativeConfig {
+ dc := &declarativeConfig{}
+ err := yaml.Unmarshal(data, dc)
+ if err != nil {
+ log.Warn("Parsing declarative config file %s failed due to error, dropping: %v", fileName, err.Error())
+ return emptyDeclarativeConfig()
+ }
+ if dc.Config == nil {
+ dc.Config = make(map[string]string)
+ }
+ return dc
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/envconfigsource.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/envconfigsource.go
new file mode 100644
index 000000000..d5f28ef9b
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/envconfigsource.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+package config
+
+import (
+ "github.com/DataDog/dd-trace-go/v2/internal/env"
+ "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
+)
+
+type envConfigSource struct{}
+
+func (e *envConfigSource) get(key string) string {
+ return env.Get(normalizeKey(key))
+}
+
+func (e *envConfigSource) origin() telemetry.Origin {
+ return telemetry.OriginEnvVar
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/otelenvconfigsource.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/otelenvconfigsource.go
new file mode 100644
index 000000000..bcbd26e29
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/config/otelenvconfigsource.go
@@ -0,0 +1,201 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+package config
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/DataDog/dd-trace-go/v2/internal"
+ "github.com/DataDog/dd-trace-go/v2/internal/env"
+ "github.com/DataDog/dd-trace-go/v2/internal/log"
+ "github.com/DataDog/dd-trace-go/v2/internal/telemetry"
+)
+
+const (
+ ddPrefix = "config_datadog:"
+ otelPrefix = "config_opentelemetry:"
+)
+
+type otelEnvConfigSource struct{}
+
+func (o *otelEnvConfigSource) get(key string) string {
+ ddKey := normalizeKey(key)
+ entry := otelConfigs[ddKey]
+ if entry == nil {
+ return ""
+ }
+ otVal := env.Get(entry.ot)
+ if otVal == "" {
+ return ""
+ }
+ if ddVal := env.Get(ddKey); ddVal != "" {
+ log.Warn("Both %q and %q are set, using %s=%s", entry.ot, ddKey, entry.ot, ddVal)
+ telemetryTags := []string{ddPrefix + strings.ToLower(ddKey), otelPrefix + strings.ToLower(entry.ot)}
+ telemetry.Count(telemetry.NamespaceTracers, "otel.env.hiding", telemetryTags).Submit(1)
+ }
+ val, err := entry.remapper(otVal)
+ if err != nil {
+ log.Warn("%s", err.Error())
+ telemetryTags := []string{ddPrefix + strings.ToLower(ddKey), otelPrefix + strings.ToLower(entry.ot)}
+ telemetry.Count(telemetry.NamespaceTracers, "otel.env.invalid", telemetryTags).Submit(1)
+ return ""
+ }
+ return val
+}
+
+func (o *otelEnvConfigSource) origin() telemetry.Origin {
+ return telemetry.OriginEnvVar
+}
+
+type otelDDEnv struct {
+ ot string
+ remapper func(string) (string, error)
+}
+
+var otelConfigs = map[string]*otelDDEnv{
+ "DD_SERVICE": {
+ ot: "OTEL_SERVICE_NAME",
+ remapper: mapService,
+ },
+ "DD_RUNTIME_METRICS_ENABLED": {
+ ot: "OTEL_METRICS_EXPORTER",
+ remapper: mapMetrics,
+ },
+ "DD_TRACE_DEBUG": {
+ ot: "OTEL_LOG_LEVEL",
+ remapper: mapLogLevel,
+ },
+ "DD_TRACE_ENABLED": {
+ ot: "OTEL_TRACES_EXPORTER",
+ remapper: mapEnabled,
+ },
+ "DD_TRACE_SAMPLE_RATE": {
+ ot: "OTEL_TRACES_SAMPLER",
+ remapper: mapSampleRate,
+ },
+ "DD_TRACE_PROPAGATION_STYLE": {
+ ot: "OTEL_PROPAGATORS",
+ remapper: mapPropagationStyle,
+ },
+ "DD_TAGS": {
+ ot: "OTEL_RESOURCE_ATTRIBUTES",
+ remapper: mapDDTags,
+ },
+}
+
+var ddTagsMapping = map[string]string{
+ "service.name": "service",
+ "deployment.environment": "env",
+ "service.version": "version",
+}
+
+var unsupportedSamplerMapping = map[string]string{
+ "always_on": "parentbased_always_on",
+ "always_off": "parentbased_always_off",
+ "traceidratio": "parentbased_traceidratio",
+}
+
+var propagationMapping = map[string]string{
+ "tracecontext": "tracecontext",
+ "b3": "b3 single header",
+ "b3multi": "b3multi",
+ "datadog": "datadog",
+ "none": "none",
+}
+
+// mapService maps OTEL_SERVICE_NAME to DD_SERVICE
+func mapService(ot string) (string, error) {
+ return ot, nil
+}
+
+// mapMetrics maps OTEL_METRICS_EXPORTER to DD_RUNTIME_METRICS_ENABLED
+func mapMetrics(ot string) (string, error) {
+ ot = strings.TrimSpace(strings.ToLower(ot))
+ if ot == "none" {
+ return "false", nil
+ }
+ return "", fmt.Errorf("the following configuration is not supported: OTEL_METRICS_EXPORTER=%v", ot)
+}
+
+// mapLogLevel maps OTEL_LOG_LEVEL to DD_TRACE_DEBUG
+func mapLogLevel(ot string) (string, error) {
+ if strings.TrimSpace(strings.ToLower(ot)) == "debug" {
+ return "true", nil
+ }
+ return "", fmt.Errorf("the following configuration is not supported: OTEL_LOG_LEVEL=%v", ot)
+}
+
+// mapEnabled maps OTEL_TRACES_EXPORTER to DD_TRACE_ENABLED
+func mapEnabled(ot string) (string, error) {
+ if strings.TrimSpace(strings.ToLower(ot)) == "none" {
+ return "false", nil
+ }
+ return "", fmt.Errorf("the following configuration is not supported: OTEL_TRACES_EXPORTER=%v", ot)
+}
+
+// mapSampleRate maps OTEL_TRACES_SAMPLER to DD_TRACE_SAMPLE_RATE
+func otelTraceIDRatio() string {
+ if v := env.Get("OTEL_TRACES_SAMPLER_ARG"); v != "" {
+ return v
+ }
+ return "1.0"
+}
+
+// mapSampleRate maps OTEL_TRACES_SAMPLER to DD_TRACE_SAMPLE_RATE
+func mapSampleRate(ot string) (string, error) {
+ ot = strings.TrimSpace(strings.ToLower(ot))
+ if v, ok := unsupportedSamplerMapping[ot]; ok {
+ log.Warn("The following configuration is not supported: OTEL_TRACES_SAMPLER=%s. %s will be used", ot, v)
+ ot = v
+ }
+
+ var samplerMapping = map[string]string{
+ "parentbased_always_on": "1.0",
+ "parentbased_always_off": "0.0",
+ "parentbased_traceidratio": otelTraceIDRatio(),
+ }
+ if v, ok := samplerMapping[ot]; ok {
+ return v, nil
+ }
+ return "", fmt.Errorf("unknown sampling configuration %v", ot)
+}
+
+// mapPropagationStyle maps OTEL_PROPAGATORS to DD_TRACE_PROPAGATION_STYLE
+func mapPropagationStyle(ot string) (string, error) {
+ ot = strings.TrimSpace(strings.ToLower(ot))
+ supportedStyles := make([]string, 0)
+ for _, otStyle := range strings.Split(ot, ",") {
+ otStyle = strings.TrimSpace(otStyle)
+ if _, ok := propagationMapping[otStyle]; ok {
+ supportedStyles = append(supportedStyles, propagationMapping[otStyle])
+ } else {
+ log.Warn("Invalid configuration: %q is not supported. This propagation style will be ignored.", otStyle)
+ }
+ }
+ return strings.Join(supportedStyles, ","), nil
+}
+
+// mapDDTags maps OTEL_RESOURCE_ATTRIBUTES to DD_TAGS
+func mapDDTags(ot string) (string, error) {
+ ddTags := make([]string, 0)
+ internal.ForEachStringTag(ot, internal.OtelTagsDelimeter, func(key, val string) {
+ // replace otel delimiter with dd delimiter and normalize tag names
+ if ddkey, ok := ddTagsMapping[key]; ok {
+ // map reserved otel tag names to dd tag names
+ ddTags = append([]string{ddkey + internal.DDTagsDelimiter + val}, ddTags...)
+ } else {
+ ddTags = append(ddTags, key+internal.DDTagsDelimiter+val)
+ }
+ })
+
+ if len(ddTags) > 10 {
+ log.Warn("The following resource attributes have been dropped: %v. Only the first 10 resource attributes will be applied: %s", ddTags[10:], ddTags[:10]) //nolint:gocritic // Slice logging for debugging
+ ddTags = ddTags[:10]
+ }
+
+ return strings.Join(ddTags, ","), nil
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.gen.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.gen.go
index dc7edf5f8..5ee5c8505 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.gen.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.gen.go
@@ -42,7 +42,9 @@ var SupportedConfigurations = map[string]struct{}{
"DD_CIVISIBILITY_IMPACTED_TESTS_DETECTION_ENABLED": {},
"DD_CIVISIBILITY_INTERNAL_PARALLEL_EARLY_FLAKE_DETECTION_ENABLED": {},
"DD_CIVISIBILITY_LOGS_ENABLED": {},
+ "DD_CIVISIBILITY_SUBTEST_FEATURES_ENABLED": {},
"DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT": {},
+ "DD_CIVISIBILITY_USE_NOOP_TRACER": {},
"DD_CUSTOM_TRACE_ID": {},
"DD_DATA_STREAMS_ENABLED": {},
"DD_DBM_PROPAGATION_MODE": {},
@@ -80,6 +82,7 @@ var SupportedConfigurations = map[string]struct{}{
"DD_LLMOBS_ML_APP": {},
"DD_LLMOBS_PROJECT_NAME": {},
"DD_LOGGING_RATE": {},
+ "DD_METRICS_OTEL_ENABLED": {},
"DD_PIPELINE_EXECUTION_ID": {},
"DD_PROFILING_AGENTLESS": {},
"DD_PROFILING_CODE_HOTSPOTS_COLLECTION_ENABLED": {},
@@ -132,7 +135,6 @@ var SupportedConfigurations = map[string]struct{}{
"DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED": {},
"DD_TRACE_ABANDONED_SPAN_TIMEOUT": {},
"DD_TRACE_AGENT_PORT": {},
- "DD_TRACE_V1_PAYLOAD_FORMAT_ENABLED": {},
"DD_TRACE_AGENT_URL": {},
"DD_TRACE_ANALYTICS_ENABLED": {},
"DD_TRACE_AWS_ANALYTICS_ENABLED": {},
@@ -179,6 +181,7 @@ var SupportedConfigurations = map[string]struct{}{
"DD_TRACE_LEVELDB_ANALYTICS_ENABLED": {},
"DD_TRACE_LOGRUS_ANALYTICS_ENABLED": {},
"DD_TRACE_LOG_DIRECTORY": {},
+ "DD_TRACE_MCP_ANALYTICS_ENABLED": {},
"DD_TRACE_MEMCACHE_ANALYTICS_ENABLED": {},
"DD_TRACE_MGO_ANALYTICS_ENABLED": {},
"DD_TRACE_MONGO_ANALYTICS_ENABLED": {},
@@ -213,15 +216,25 @@ var SupportedConfigurations = map[string]struct{}{
"DD_TRACE_STARTUP_LOGS": {},
"DD_TRACE_STATS_COMPUTATION_ENABLED": {},
"DD_TRACE_TWIRP_ANALYTICS_ENABLED": {},
+ "DD_TRACE_V1_PAYLOAD_FORMAT_ENABLED": {},
"DD_TRACE_VALKEY_ANALYTICS_ENABLED": {},
"DD_TRACE_VALKEY_RAW_COMMAND": {},
"DD_TRACE_VAULT_ANALYTICS_ENABLED": {},
"DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH": {},
"DD_TRACE__ANALYTICS_ENABLED": {},
"DD_VERSION": {},
+ "OTEL_EXPORTER_OTLP_ENDPOINT": {},
+ "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": {},
+ "OTEL_EXPORTER_OTLP_METRICS_HEADERS": {},
+ "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL": {},
+ "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": {},
+ "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE": {},
+ "OTEL_EXPORTER_OTLP_PROTOCOL": {},
"OTEL_LOGS_EXPORTER": {},
"OTEL_LOG_LEVEL": {},
"OTEL_METRICS_EXPORTER": {},
+ "OTEL_METRIC_EXPORT_INTERVAL": {},
+ "OTEL_METRIC_EXPORT_TIMEOUT": {},
"OTEL_PROPAGATORS": {},
"OTEL_RESOURCE_ATTRIBUTES": {},
"OTEL_SERVICE_NAME": {},
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.json b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.json
index 857dd19f9..46b0dad5e 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.json
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/env/supported_configurations.json
@@ -99,6 +99,12 @@
"DD_CIVISIBILITY_LOGS_ENABLED": [
"A"
],
+ "DD_CIVISIBILITY_SUBTEST_FEATURES_ENABLED": [
+ "A"
+ ],
+ "DD_CIVISIBILITY_USE_NOOP_TRACER": [
+ "A"
+ ],
"DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT": [
"A"
],
@@ -507,6 +513,9 @@
"DD_TRACE_LOG_DIRECTORY": [
"A"
],
+ "DD_TRACE_MCP_ANALYTICS_ENABLED": [
+ "A"
+ ],
"DD_TRACE_MEMCACHE_ANALYTICS_ENABLED": [
"A"
],
@@ -621,9 +630,6 @@
"DD_TRACE_VAULT_ANALYTICS_ENABLED": [
"A"
],
- "DD_TRACE_V1_PAYLOAD_FORMAT_ENABLED": [
- "A"
- ],
"DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH": [
"A"
],
@@ -633,6 +639,30 @@
"DD_VERSION": [
"A"
],
+ "DD_METRICS_OTEL_ENABLED": [
+ "A"
+ ],
+ "OTEL_EXPORTER_OTLP_ENDPOINT": [
+ "A"
+ ],
+ "OTEL_EXPORTER_OTLP_METRICS_HEADERS": [
+ "A"
+ ],
+ "OTEL_EXPORTER_OTLP_PROTOCOL": [
+ "A"
+ ],
+ "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": [
+ "A"
+ ],
+ "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": [
+ "A"
+ ],
+ "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL": [
+ "A"
+ ],
+ "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE": [
+ "A"
+ ],
"OTEL_LOGS_EXPORTER": [
"A"
],
@@ -642,6 +672,12 @@
"OTEL_METRICS_EXPORTER": [
"A"
],
+ "OTEL_METRIC_EXPORT_INTERVAL": [
+ "A"
+ ],
+ "OTEL_METRIC_EXPORT_TIMEOUT": [
+ "A"
+ ],
"OTEL_PROPAGATORS": [
"A"
],
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/llmobs.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/llmobs.go
index a08de7e97..ef230c42f 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/llmobs.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/llmobs.go
@@ -113,6 +113,9 @@ type llmobsContext struct {
outputMessages []LLMMessage
outputText string
+ // tool specific
+ intent string
+
// experiment specific
experimentInput any
experimentExpectedOutput any
@@ -513,6 +516,14 @@ func (l *LLMObs) llmobsSpanEvent(span *Span) *transport.LLMObsSpanEvent {
meta["tool_definitions"] = toolDefinitions
}
+ if intent := span.llmCtx.intent; intent != "" {
+ if spanKind != SpanKindTool {
+ log.Warn("llmobs: dropping intent on non-tool span kind, annotating intent is only supported for tool span kinds")
+ } else {
+ meta["intent"] = intent
+ }
+ }
+
spanStatus := "ok"
var errMsg *transport.ErrorMessage
if span.error != nil {
@@ -534,6 +545,8 @@ func (l *LLMObs) llmobsSpanEvent(span *Span) *transport.LLMObsSpanEvent {
parentID := defaultParentID
if span.parent != nil {
parentID = span.parent.apm.SpanID()
+ } else if span.propagated != nil {
+ parentID = span.propagated.SpanID
}
if span.llmTraceID == "" {
log.Warn("llmobs: span has no trace ID")
@@ -578,6 +591,15 @@ func (l *LLMObs) llmobsSpanEvent(span *Span) *transport.LLMObsSpanEvent {
tagsSlice = append(tagsSlice, fmt.Sprintf("%s:%s", k, v))
}
+ ddAttrs := transport.DDAttributes{
+ SpanID: spanID,
+ TraceID: span.llmTraceID,
+ APMTraceID: span.apm.TraceID(),
+ }
+ if span.scope != "" {
+ ddAttrs.Scope = span.scope
+ }
+
ev := &transport.LLMObsSpanEvent{
SpanID: spanID,
TraceID: span.llmTraceID,
@@ -593,7 +615,7 @@ func (l *LLMObs) llmobsSpanEvent(span *Span) *transport.LLMObsSpanEvent {
Metrics: span.llmCtx.metrics,
CollectionErrors: nil,
SpanLinks: span.spanLinks,
- Scope: span.scope,
+ DDAttributes: ddAttrs,
}
if b, err := json.Marshal(ev); err == nil {
rawSize := len(b)
@@ -688,6 +710,7 @@ func (l *LLMObs) StartSpan(ctx context.Context, kind SpanKind, name string, cfg
span.mlApp = cfg.MLApp
span.spanKind = kind
span.sessionID = cfg.SessionID
+ span.integration = cfg.Integration
span.llmCtx = llmobsContext{
modelName: cfg.ModelName,
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/span.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/span.go
index c0d96fa67..3919f440b 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/span.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/span.go
@@ -30,6 +30,8 @@ type StartSpanConfig struct {
MLApp string
// StartTime sets a custom start time for the span. If zero, uses current time.
StartTime time.Time
+ // Name of the tracing integration.
+ Integration string
}
// FinishSpanConfig contains configuration options for finishing an LLMObs span.
@@ -189,6 +191,9 @@ type SpanAnnotations struct {
// ToolDefinitions are the tool definitions for LLM spans.
ToolDefinitions []ToolDefinition
+ // Intent is a description of a reason for calling an MCP tool on tool spans
+ Intent string
+
// AgentManifest is the agent manifest for agent spans.
AgentManifest string
@@ -366,6 +371,14 @@ func (s *Span) Annotate(a SpanAnnotations) {
}
}
+ if a.Intent != "" {
+ if s.spanKind != SpanKindTool {
+ log.Warn("llmobs: intent can only be annotated on tool spans, ignoring")
+ } else {
+ s.llmCtx.intent = a.Intent
+ }
+ }
+
s.annotateIO(a)
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/span.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/span.go
index 50521db0f..236bcb12f 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/span.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/llmobs/transport/span.go
@@ -22,6 +22,13 @@ type SpanLink struct {
Flags uint32 `json:"flags,omitempty"`
}
+type DDAttributes struct {
+ SpanID string `json:"span_id"`
+ TraceID string `json:"trace_id"`
+ APMTraceID string `json:"apm_trace_id"`
+ Scope string `json:"scope,omitempty"`
+}
+
type LLMObsSpanEvent struct {
SpanID string `json:"span_id,omitempty"`
TraceID string `json:"trace_id,omitempty"`
@@ -37,7 +44,7 @@ type LLMObsSpanEvent struct {
Metrics map[string]float64 `json:"metrics,omitempty"`
CollectionErrors []string `json:"collection_errors,omitempty"`
SpanLinks []SpanLink `json:"span_links,omitempty"`
- Scope string `json:"-"`
+ DDAttributes DDAttributes `json:"_dd"`
}
type PushSpanEventsRequest struct {
@@ -65,8 +72,8 @@ func (c *Transport) PushSpanEvents(
EventType: "span",
Spans: []*LLMObsSpanEvent{ev},
}
- if ev.Scope != "" {
- req.Scope = ev.Scope
+ if ev.DDAttributes.Scope != "" {
+ req.Scope = ev.DDAttributes.Scope
}
body = append(body, req)
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/otelcontextmapping.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/otelcontextmapping.go
new file mode 100644
index 000000000..6fc8e5bf2
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/otelcontextmapping.go
@@ -0,0 +1,12 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+//go:build !linux
+
+package internal
+
+func CreateOtelProcessContextMapping(data []byte) error {
+ return nil
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/otelcontextmapping_linux.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/otelcontextmapping_linux.go
new file mode 100644
index 000000000..e5fa4cd6c
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/otelcontextmapping_linux.go
@@ -0,0 +1,130 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2025 Datadog, Inc.
+
+//go:build linux
+
+// This is a go port of https://github.com/DataDog/fullhost-code-hotspots-wip/blob/main/lang-exp/anonmapping-clib/otel_process_ctx.c
+
+package internal
+
+import (
+ "fmt"
+ "os"
+ "structs"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // These two constants are not in x/sys/unix by default; copy them from .
+ //nolint:revive
+ PR_SET_VMA = 0x53564D41
+ //nolint:revive
+ PR_SET_VMA_ANON_NAME = 0
+
+ otelContextSignature = "OTEL_CTX"
+)
+
+var (
+ otelContextMappingSize = 2 * os.Getpagesize()
+
+ existingMappingBytes []byte
+ publisherPID int
+)
+
+type processContextHeader struct {
+ _ structs.HostLayout
+ Signature [8]byte
+ Version uint32
+ PayloadSize uint32
+ PayloadAddr uintptr
+}
+
+func CreateOtelProcessContextMapping(data []byte) error {
+ // Clear the previous mapping if it exists
+ err := removeOtelProcessContextMapping()
+ if err != nil {
+ return fmt.Errorf("failed to remove previous mapping: %w", err)
+ }
+
+ headerSize := int(unsafe.Sizeof(processContextHeader{}))
+ if len(data)+headerSize > otelContextMappingSize {
+ return fmt.Errorf("data size is too large for the mapping size")
+ }
+
+ mappingBytes, err := unix.Mmap(
+ -1, // fd = -1 for an anonymous mapping
+ 0, // offset
+ otelContextMappingSize, // length
+ unix.PROT_READ|unix.PROT_WRITE,
+ unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
+ )
+ if err != nil {
+ return fmt.Errorf("failed to mmap: %w", err)
+ }
+
+ err = unix.Madvise(mappingBytes, unix.MADV_DONTFORK)
+ if err != nil {
+ _ = unix.Munmap(mappingBytes)
+ return fmt.Errorf("failed to madvise: %w", err)
+ }
+
+ addr := uintptr(unsafe.Pointer(&mappingBytes[0]))
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ header := processContextHeader{
+ Version: 1,
+ PayloadSize: uint32(len(data)),
+ PayloadAddr: addr + uintptr(headerSize),
+ }
+ copy(mappingBytes[headerSize:], data)
+ copy(mappingBytes[:headerSize], unsafe.Slice((*byte)(unsafe.Pointer(&header)), headerSize))
+ }()
+ wg.Wait()
+ // write the signature last to ensure that once a process validates the signature, it can safely read the whole data
+ copy(mappingBytes, otelContextSignature)
+
+ err = unix.Mprotect(mappingBytes, unix.PROT_READ)
+ if err != nil {
+ _ = unix.Munmap(mappingBytes)
+ return fmt.Errorf("failed to mprotect: %w", err)
+ }
+
+ // prctl expects a null-terminated string
+ contextNameNullTerminated, _ := unix.ByteSliceFromString(otelContextSignature)
+ // Failure to set the vma anon name is not a critical error (only supported on Linux 5.17+), so we ignore the return value.
+ _ = unix.Prctl(
+ PR_SET_VMA,
+ uintptr(PR_SET_VMA_ANON_NAME),
+ addr,
+ uintptr(otelContextMappingSize),
+ uintptr(unsafe.Pointer(&contextNameNullTerminated[0])),
+ )
+
+ existingMappingBytes = mappingBytes
+ publisherPID = os.Getpid()
+ return nil
+}
+
+func removeOtelProcessContextMapping() error {
+ //Check publisher PID to check that the process has not forked.
+ //It should not be necessary for Go, but just in case.
+ if existingMappingBytes == nil || publisherPID != os.Getpid() {
+ return nil
+ }
+
+ err := unix.Munmap(existingMappingBytes)
+ if err != nil {
+ return fmt.Errorf("failed to munmap: %w", err)
+ }
+ existingMappingBytes = nil
+ publisherPID = 0
+ return nil
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/remoteconfig.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/remoteconfig.go
index 464ed56a0..f6e8da574 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/remoteconfig.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/remoteconfig/remoteconfig.go
@@ -161,13 +161,12 @@ type Client struct {
lastError error
}
-// client is a RC client singleton that can be accessed by multiple products (tracing, ASM, profiling etc.).
-// Using a single RC client instance in the tracer is a requirement for remote configuration.
-var client *Client
-
var (
- startOnce sync.Once
- stopOnce sync.Once
+ // client is a RC client singleton that can be accessed by multiple products (tracing, ASM, profiling etc.).
+ // Using a single RC client instance in the tracer is a requirement for remote configuration.
+ client *Client
+ clientMux sync.Mutex
+ started bool
)
// newClient creates a new remoteconfig Client
@@ -197,34 +196,48 @@ func newClient(config ClientConfig) (*Client, error) {
// Start starts the client's update poll loop in a fresh goroutine.
// Noop if the client has already started.
func Start(config ClientConfig) error {
+ if !internal.BoolEnv("DD_REMOTE_CONFIGURATION_ENABLED", true) {
+ // Don't start polling if the feature is disabled explicitly
+ return nil
+ }
+ clientMux.Lock()
+ defer clientMux.Unlock()
+
+ if started {
+ // Return early if already started.
+ return nil
+ }
var err error
- startOnce.Do(func() {
- if !internal.BoolEnv("DD_REMOTE_CONFIGURATION_ENABLED", true) {
- // Don't start polling if the feature is disabled explicitly
- return
- }
- client, err = newClient(config)
- if err != nil {
- return
- }
- go func() {
- ticker := time.NewTicker(client.PollInterval)
- defer ticker.Stop()
-
- for {
- select {
- case <-client.stop:
- close(client.stop)
+ client, err = newClient(config)
+ if err != nil {
+ return err
+ }
+ started = true
+
+ var (
+ pollInterval = client.PollInterval
+ stop = client.stop
+ )
+ go func() {
+ ticker := time.NewTicker(pollInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-stop:
+ close(stop)
+ return
+ case <-ticker.C:
+ if client == nil {
return
- case <-ticker.C:
- client.Lock()
- client.updateState()
- client.Unlock()
}
+ client.Lock()
+ client.updateState()
+ client.Unlock()
}
- }()
- })
- return err
+ }
+ }()
+ return nil
}
// Stop stops the client's update poll loop.
@@ -232,28 +245,37 @@ func Start(config ClientConfig) error {
// The remote config client is supposed to have the same lifecycle as the tracer.
// It can't be restarted after a call to Stop() unless explicitly calling Reset().
func Stop() {
+ clientMux.Lock()
+ defer clientMux.Unlock()
+
if client == nil {
// In case Stop() is called before Start()
return
}
- stopOnce.Do(func() {
- log.Debug("remoteconfig: gracefully stopping the client")
- client.stop <- struct{}{}
- select {
- case <-client.stop:
- log.Debug("remoteconfig: client stopped successfully")
- case <-time.After(time.Second):
- log.Debug("remoteconfig: client stopping timeout")
- }
- })
+ if !started {
+ // Return early if already stopped.
+ return
+ }
+ log.Debug("remoteconfig: gracefully stopping the client")
+ client.stop <- struct{}{}
+ select {
+ case <-client.stop:
+ log.Debug("remoteconfig: client stopped successfully")
+ case <-time.After(time.Second):
+ log.Debug("remoteconfig: client stopping timeout")
+ }
+ client = nil
+ started = false
}
// Reset destroys the client instance.
// To be used only in tests to reset the state of the client.
func Reset() {
+ clientMux.Lock()
+ defer clientMux.Unlock()
+
client = nil
- startOnce = sync.Once{}
- stopOnce = sync.Once{}
+ started = false
}
func (c *Client) updateState() {
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/contribs_generated.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/contribs_generated.go
index f6f8f77f3..7fcc2d78b 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/contribs_generated.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/contribs_generated.go
@@ -212,6 +212,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/aws/aws-sdk-go-v2/service/sts",
"github.com/aws/aws-xray-sdk-go/v2",
"github.com/aws/smithy-go",
+ "github.com/bahlo/generic-list-go",
"github.com/beorn7/perks",
"github.com/bgentry/speakeasy",
"github.com/bitly/go-hostpool",
@@ -221,6 +222,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/bsm/ginkgo/v2",
"github.com/bsm/gomega",
"github.com/buger/goterm",
+ "github.com/buger/jsonparser",
"github.com/bytedance/sonic",
"github.com/bytedance/sonic/loader",
"github.com/cenkalti/backoff/v3",
@@ -361,6 +363,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/gogo/protobuf",
"github.com/golang-jwt/jwt",
"github.com/golang-jwt/jwt/v4",
+ "github.com/golang-jwt/jwt/v5",
"github.com/golang-sql/civil",
"github.com/golang-sql/sqlexp",
"github.com/golang/glog",
@@ -376,6 +379,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/google/go-cmp",
"github.com/google/go-pkcs11",
"github.com/google/gofuzz",
+ "github.com/google/jsonschema-go",
"github.com/google/martian/v3",
"github.com/google/pprof",
"github.com/google/s2a-go",
@@ -503,6 +507,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/lyft/protoc-gen-star/v2",
"github.com/magiconair/properties",
"github.com/mailru/easyjson",
+ "github.com/mark3labs/mcp-go",
"github.com/matryer/moq",
"github.com/mattn/go-colorable",
"github.com/mattn/go-isatty",
@@ -536,6 +541,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/moby/sys/user",
"github.com/moby/sys/userns",
"github.com/moby/term",
+ "github.com/modelcontextprotocol/go-sdk",
"github.com/modern-go/concurrent",
"github.com/modern-go/reflect2",
"github.com/modocache/gover",
@@ -658,7 +664,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/uptrace/bun",
"github.com/uptrace/bun/dialect/sqlitedialect",
"github.com/urfave/cli",
- "github.com/urfave/cli/v2",
+ "github.com/urfave/cli/v3",
"github.com/urfave/negroni",
"github.com/valkey-io/valkey-go",
"github.com/valyala/bytebufferpool",
@@ -673,6 +679,7 @@ func generatedThirdPartyLibraries() []string {
"github.com/vmihailenco/msgpack/v5",
"github.com/vmihailenco/tagparser",
"github.com/vmihailenco/tagparser/v2",
+ "github.com/wk8/go-ordered-map/v2",
"github.com/x448/float16",
"github.com/xdg-go/pbkdf2",
"github.com/xdg-go/scram",
@@ -680,7 +687,6 @@ func generatedThirdPartyLibraries() []string {
"github.com/xeipuuv/gojsonpointer",
"github.com/xeipuuv/gojsonreference",
"github.com/xeipuuv/gojsonschema",
- "github.com/xrash/smetrics",
"github.com/yosida95/uritemplate/v3",
"github.com/youmark/pkcs8",
"github.com/yuin/goldmark",
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace.go
index c7536d0b3..94c5c87ff 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/stacktrace.go
@@ -10,7 +10,6 @@ package stacktrace
import (
"errors"
- "regexp"
"runtime"
"slices"
"strconv"
@@ -179,28 +178,71 @@ func (q *queue[T]) Remove() T {
return item
}
-var symbolRegex = regexp.MustCompile(`^(([^(]+/)?([^(/.]+)?)(\.\(([^/)]+)\))?\.([^/()]+)$`)
-
-// parseSymbol parses a symbol name into its package, receiver and function
-// ex: github.com/DataDog/dd-trace-go/v2/internal/stacktrace.(*Event).NewException
-// -> package: github.com/DataDog/dd-trace-go/v2/internal/stacktrace
-// -> receiver: *Event
-// -> function: NewException
+// parseSymbol parses a symbol name into its package, receiver and function using
+// zero-allocation string operations. This is a hot path called once per stack frame.
+//
+// Handles various Go symbol formats:
+// - Simple function: pkg.Function
+// - Method with receiver: pkg.(*Type).Method or pkg.(Type).Method
+// - Lambda/closure: pkg.Function.func1 or pkg.(*Type).Method.func1
+// - Generics: pkg.(*Type[...]).Method or pkg.Function[...]
+//
+// Examples:
+//
+// github.com/DataDog/dd-trace-go/v2/internal/stacktrace.(*Event).NewException
+// -> package: github.com/DataDog/dd-trace-go/v2/internal/stacktrace
+// -> receiver: *Event
+// -> function: NewException
+// github.com/DataDog/dd-trace-go/v2/internal/stacktrace.TestFunc.func1
+// -> package: github.com/DataDog/dd-trace-go/v2/internal/stacktrace
+// -> receiver: ""
+// -> function: TestFunc.func1
func parseSymbol(name string) symbol {
- matches := symbolRegex.FindStringSubmatch(name)
- if len(matches) != 7 {
- log.Error("Failed to parse symbol for stacktrace: %s", name)
- return symbol{
- Package: "",
- Receiver: "",
- Function: "",
+ // Check for receiver first: pkg.(*Type) or pkg.(Type)
+ // Look for ".(" which marks the start of a receiver
+ if idx := strings.Index(name, ".("); idx != -1 {
+ // Find the closing paren of the receiver
+ receiverEnd := strings.IndexByte(name[idx+2:], ')')
+ if receiverEnd != -1 {
+ pkg := name[:idx]
+ receiver := name[idx+2 : idx+2+receiverEnd]
+ // Everything after ")." is the function (which may contain dots for lambdas)
+ fn := name[idx+2+receiverEnd+2:] // +2 for ")."
+ return symbol{
+ Package: pkg,
+ Receiver: receiver,
+ Function: fn,
+ }
}
}
+ // No receiver case: need to find where package ends and function begins
+ // Package path ends at the last '/' followed by a segment before first '.'
+ // Examples:
+ // "pkg.Function" -> pkg: "pkg", fn: "Function"
+ // "pkg.Function.func1" -> pkg: "pkg", fn: "Function.func1"
+ // "github.com/org/pkg.Function" -> pkg: "github.com/org/pkg", fn: "Function"
+
+ // Find the last slash to identify where the package name starts
+ lastSlash := strings.LastIndexByte(name, '/')
+
+ // Find the first dot after the last slash (or from the beginning if no slash)
+ searchStart := 0
+ if lastSlash != -1 {
+ searchStart = lastSlash + 1
+ }
+
+ firstDotAfterSlash := strings.IndexByte(name[searchStart:], '.')
+ if firstDotAfterSlash == -1 {
+ // No dots after last slash, the whole thing is the function name
+ return symbol{Function: name}
+ }
+
+ // Package ends at this dot, function starts after it
+ pkgEnd := searchStart + firstDotAfterSlash
return symbol{
- Package: matches[1],
- Receiver: matches[5],
- Function: matches[6],
+ Package: name[:pkgEnd],
+ Function: name[pkgEnd+1:], // Everything after the dot, including nested dots for lambdas
}
}
@@ -218,6 +260,20 @@ func SkipAndCapture(skip int) StackTrace {
}).capture()
}
+// SkipAndCaptureWithInternalFrames creates a new stack trace from the current call stack without filtering internal frames.
+// This is useful for tracer span error stacktraces where we want to capture all frames.
+func SkipAndCaptureWithInternalFrames(depth int, skip int) StackTrace {
+ // Use default depth if not specified
+ if depth == 0 {
+ depth = defaultMaxDepth
+ }
+ return iterator(skip, depth, frameOptions{
+ skipInternalFrames: false,
+ redactCustomerFrames: false,
+ internalPackagePrefixes: nil,
+ }).capture()
+}
+
// CaptureRaw captures only program counters without symbolication.
// This is significantly faster than full capture as it avoids runtime.CallersFrames
// and symbol parsing. The skip parameter determines how many frames to skip from
@@ -273,18 +329,18 @@ func (r RawStackTrace) SymbolicateWithRedaction() StackTrace {
// capture extracts frames from an iterator using the same algorithm as capture
func (iter *framesIterator) capture() StackTrace {
- stack := make([]StackFrame, iter.cacheSize)
+ stack := make([]StackFrame, iter.maxDepth)
nbStoredFrames := 0
- topFramesQueue := newQueue[StackFrame](defaultTopFrameDepth)
+ topFramesQueue := newQueue[StackFrame](iter.topFrameDepth)
// We have to make sure we don't store more than maxDepth frames
// if there is more than maxDepth frames, we get X frames from the bottom of the stack and Y from the top
for frame, ok := iter.Next(); ok; frame, ok = iter.Next() {
// we reach the top frames: start to use the queue
- if nbStoredFrames >= defaultMaxDepth-defaultTopFrameDepth {
+ if nbStoredFrames >= iter.maxDepth-iter.topFrameDepth {
topFramesQueue.Add(frame)
// queue is full, remove the oldest frame
- if topFramesQueue.Length() > defaultTopFrameDepth {
+ if topFramesQueue.Length() > iter.topFrameDepth {
topFramesQueue.Remove()
}
continue
@@ -318,40 +374,54 @@ type frameOptions struct {
// IMPORTANT: This iterator is NOT thread-safe and should only be used within a single goroutine.
// Each call to Capture/SkipAndCapture/CaptureWithRedaction creates a new iterator instance.
type framesIterator struct {
- frames *queue[runtime.Frame]
- frameOpts frameOptions
- rawPCs []uintptr
- cache []uintptr
- cacheSize int
- cacheDepth int
- currDepth int
- useRawPCs bool
+ frames *queue[runtime.Frame]
+ frameOpts frameOptions
+ rawPCs []uintptr
+ cache []uintptr
+ cacheSize int
+ cacheDepth int
+ currDepth int
+ useRawPCs bool
+ maxDepth int
+ topFrameDepth int
}
-func iterator(skip, cacheSize int, opts frameOptions) *framesIterator {
+func iterator(skip, maxDepth int, opts frameOptions) *framesIterator {
+ topFrameDepth := maxDepth / 4
+ if topFrameDepth < 1 {
+ topFrameDepth = 1
+ }
return &framesIterator{
- frameOpts: opts,
- frames: newQueue[runtime.Frame](cacheSize + 4),
- cache: make([]uintptr, cacheSize),
- cacheSize: cacheSize,
- cacheDepth: skip,
- currDepth: 0,
+ frameOpts: opts,
+ frames: newQueue[runtime.Frame](maxDepth + 4),
+ cache: make([]uintptr, maxDepth),
+ cacheSize: maxDepth,
+ cacheDepth: skip,
+ currDepth: 0,
+ maxDepth: maxDepth,
+ topFrameDepth: topFrameDepth,
}
}
// iteratorFromRaw creates an iterator from pre-captured PCs for deferred symbolication
func iteratorFromRaw(pcs []uintptr, opts frameOptions) *framesIterator {
- cacheSize := min(len(pcs), defaultMaxDepth)
+ maxDepth := min(len(pcs), defaultMaxDepth)
+ topFrameDepth := maxDepth / 4
+ if topFrameDepth < 1 {
+ topFrameDepth = 1
+ }
return &framesIterator{
- frameOpts: opts,
- frames: newQueue[runtime.Frame](cacheSize + 4),
- cache: make([]uintptr, cacheSize),
- cacheSize: cacheSize,
- cacheDepth: 0,
- useRawPCs: true,
- rawPCs: pcs,
- currDepth: 0,
+ frameOpts: opts,
+ frames: newQueue[runtime.Frame](maxDepth + 4),
+ cache: make([]uintptr, maxDepth),
+ cacheSize: maxDepth,
+ cacheDepth: 0,
+ useRawPCs: true,
+ rawPCs: pcs,
+ currDepth: 0,
+ maxDepth: maxDepth,
+ topFrameDepth: topFrameDepth,
}
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/trie.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/trie.go
index 6fda07f6a..3ccf4166c 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/trie.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/stacktrace/trie.go
@@ -5,149 +5,17 @@
package stacktrace
-import (
- "sync"
-)
-
-// prefixTrie is a thread-safe trie data structure optimized for prefix matching.
-// It's designed for high-performance concurrent read operations with occasional writes.
-//
-// Memory vs Performance Trade-offs:
-// - Slightly higher initial memory overhead for data structure vs slice
-// - Zero allocations during lookups (same as slice)
-// - O(m) lookup time where m=string length (vs O(n) where n=prefix count)
-// - Performance varies: slower for early matches, much faster for no-match scenarios
-type prefixTrie struct {
- root *trieNode
- mu sync.RWMutex
-}
-
-// trieNode represents a single node in the trie
-type trieNode struct {
- children map[rune]*trieNode
- isEnd bool // true if this node represents the end of a prefix
-}
-
-// newPrefixTrie creates a new empty PrefixTrie
-func newPrefixTrie() *prefixTrie {
- return &prefixTrie{
- root: &trieNode{
- children: make(map[rune]*trieNode),
- },
- }
-}
-
-// Insert adds a prefix to the trie
-func (t *prefixTrie) Insert(prefix string) {
- if prefix == "" {
- return
- }
-
- t.mu.Lock()
- defer t.mu.Unlock()
-
- node := t.root
- for _, ch := range prefix {
- if node.children[ch] == nil {
- node.children[ch] = &trieNode{
- children: make(map[rune]*trieNode),
- }
- }
- node = node.children[ch]
- }
- node.isEnd = true
-}
-
-// HasPrefix checks if the given string has any of the prefixes stored in the trie.
-// Returns true if any prefix in the trie is a prefix of the input string.
-func (t *prefixTrie) HasPrefix(s string) (found bool) {
- if s == "" {
- return false
- }
-
- t.mu.RLock()
- defer t.mu.RUnlock()
-
- node := t.root
- for _, ch := range s {
- if node.isEnd {
- return true
- }
-
- node = node.children[ch]
- if node == nil {
- return false
- }
- }
-
- return node.isEnd
-}
-
-// InsertAll adds multiple prefixes to the trie in a single operation
-func (t *prefixTrie) InsertAll(prefixes []string) {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- for _, prefix := range prefixes {
- if prefix == "" {
- continue
- }
-
- node := t.root
- for _, ch := range prefix {
- if node.children[ch] == nil {
- node.children[ch] = &trieNode{
- children: make(map[rune]*trieNode),
- }
- }
- node = node.children[ch]
- }
- node.isEnd = true
- }
-}
-
-// Size returns the number of prefixes stored in the trie
-func (t *prefixTrie) Size() int {
- t.mu.RLock()
- defer t.mu.RUnlock()
-
- return t.countPrefixes(t.root)
-}
-
-// countPrefixes recursively counts the number of complete prefixes in the trie
-func (t *prefixTrie) countPrefixes(node *trieNode) int {
- if node == nil {
- return 0
- }
-
- count := 0
- if node.isEnd {
- count = 1
- }
-
- for _, child := range node.children {
- count += t.countPrefixes(child)
- }
-
- return count
-}
-
-// Clear removes all prefixes from the trie
-func (t *prefixTrie) Clear() {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- t.root = &trieNode{
- children: make(map[rune]*trieNode),
- }
-}
-
// segmentPrefixTrie is a path segment-based trie optimized for "/" delimited paths.
// It stores path segments (e.g., "github.com", "DataDog") as nodes instead of individual characters,
// providing better memory efficiency and potentially faster lookups for module paths.
+//
+// Concurrency: This trie follows a write-once-read-many (WORM) pattern where all writes
+// occur during package initialization (init function) before any concurrent access begins.
+// After initialization, the trie is effectively immutable and can be safely read by multiple
+// goroutines without synchronization.
type segmentPrefixTrie struct {
root *segmentTrieNode
- mu sync.RWMutex
+ // No mutex needed - structure is immutable after init()
}
// segmentTrieNode represents a single path segment node in the trie
@@ -165,15 +33,13 @@ func newSegmentPrefixTrie() *segmentPrefixTrie {
}
}
-// Insert adds a prefix to the segment trie
+// Insert adds a prefix to the segment trie.
+// This method should only be called during initialization before any concurrent access.
func (t *segmentPrefixTrie) Insert(prefix string) {
if prefix == "" {
return
}
- t.mu.Lock()
- defer t.mu.Unlock()
-
node := t.root
start := 0
@@ -197,14 +63,12 @@ func (t *segmentPrefixTrie) Insert(prefix string) {
}
// HasPrefix checks if the given string has any of the prefixes stored in the segment trie.
+// Safe for concurrent use after initialization.
func (t *segmentPrefixTrie) HasPrefix(s string) (found bool) {
if s == "" {
return false
}
- t.mu.RLock()
- defer t.mu.RUnlock()
-
node := t.root
start := 0
@@ -230,11 +94,9 @@ func (t *segmentPrefixTrie) HasPrefix(s string) (found bool) {
return node.isEnd
}
-// InsertAll adds multiple prefixes to the segment trie in a single operation
+// InsertAll adds multiple prefixes to the segment trie in a single operation.
+// This method should only be called during initialization before any concurrent access.
func (t *segmentPrefixTrie) InsertAll(prefixes []string) {
- t.mu.Lock()
- defer t.mu.Unlock()
-
for _, prefix := range prefixes {
if prefix == "" {
continue
@@ -263,11 +125,9 @@ func (t *segmentPrefixTrie) InsertAll(prefixes []string) {
}
}
-// Size returns the number of prefixes stored in the segment trie
+// Size returns the number of prefixes stored in the segment trie.
+// Safe for concurrent use after initialization.
func (t *segmentPrefixTrie) Size() int {
- t.mu.RLock()
- defer t.mu.RUnlock()
-
return t.countSegmentPrefixes(t.root)
}
@@ -289,11 +149,9 @@ func (t *segmentPrefixTrie) countSegmentPrefixes(node *segmentTrieNode) int {
return count
}
-// Clear removes all prefixes from the segment trie
+// Clear removes all prefixes from the segment trie.
+// This method should only be called during initialization before any concurrent access.
func (t *segmentPrefixTrie) Clear() {
- t.mu.Lock()
- defer t.mu.Unlock()
-
t.root = &segmentTrieNode{
children: make(map[string]*segmentTrieNode),
}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/writer.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/writer.go
index 7a4162ea6..4374f61e0 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/writer.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/telemetry/internal/writer.go
@@ -11,7 +11,6 @@ import (
"errors"
"fmt"
"io"
- "net"
"net/http"
"os"
"runtime"
@@ -29,25 +28,6 @@ import (
"github.com/DataDog/dd-trace-go/v2/internal/version"
)
-// We copy the transport to avoid using the default one, as it might be
-// augmented with tracing and we don't want these calls to be recorded.
-// See https://golang.org/pkg/net/http/#DefaultTransport .
-var defaultHTTPClient = &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).DialContext,
- ForceAttemptHTTP2: true,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- },
- Timeout: 5 * time.Second,
-}
-
func newBody(config TracerConfig, debugMode bool) *transport.Body {
osHostname, err := os.Hostname()
if err != nil {
@@ -130,7 +110,7 @@ func NewWriter(config WriterConfig) (Writer, error) {
}
if config.HTTPClient == nil {
- config.HTTPClient = defaultHTTPClient
+ config.HTTPClient = internal.DefaultHTTPClient(5*time.Second, true)
}
// Don't allow the client to have a timeout higher than 5 seconds
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/uds.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/uds.go
index 4f549490b..db1077d82 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/uds.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/uds.go
@@ -6,9 +6,13 @@
package internal
import (
+ "context"
"fmt"
+ "net"
+ "net/http"
"net/url"
"strings"
+ "time"
)
func UnixDataSocketURL(path string) *url.URL {
@@ -17,3 +21,23 @@ func UnixDataSocketURL(path string) *url.URL {
Host: fmt.Sprintf("UDS_%s", strings.NewReplacer(":", "_", "/", "_", `\`, "_").Replace(path)),
}
}
+
+// UDSClient returns a new http.Client which connects using the given UDS socket path.
+func UDSClient(socketPath string, timeout time.Duration) *http.Client {
+ return &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return DefaultDialer(timeout).DialContext(ctx, "unix", (&net.UnixAddr{
+ Name: socketPath,
+ Net: "unix",
+ }).String())
+ },
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ },
+ Timeout: timeout,
+ }
+}
diff --git a/vendor/github.com/DataDog/dd-trace-go/v2/internal/version/version.go b/vendor/github.com/DataDog/dd-trace-go/v2/internal/version/version.go
index bdd2acf27..55b52462f 100644
--- a/vendor/github.com/DataDog/dd-trace-go/v2/internal/version/version.go
+++ b/vendor/github.com/DataDog/dd-trace-go/v2/internal/version/version.go
@@ -11,13 +11,13 @@ import (
"strings"
"sync"
- "github.com/Masterminds/semver/v3"
+ "golang.org/x/mod/semver"
)
// Tag specifies the current release tag. It needs to be manually
// updated. A test checks that the value of Tag never points to a
// git tag that is older than HEAD.
-var Tag = "v2.4.0"
+var Tag = "v2.5.0"
type v1version struct {
Transitional bool
@@ -94,24 +94,42 @@ type version struct {
}
func parseVersion(value string) version {
- var (
- parsedVersion = semver.MustParse(value)
- v = version{
- Major: int(parsedVersion.Major()),
- Minor: int(parsedVersion.Minor()),
- Patch: int(parsedVersion.Patch()),
- }
- )
+ var v version
+
+ if !semver.IsValid(value) {
+ // This shouldn't happen, but it must be handled.
+ // `golang.org/x/mod/semver` doesn't expose the parsed parts of the version.
+ return v
+ }
+
+ i := strings.Index(value, ".")
+ v.Major, _ = strconv.Atoi(value[1:i])
+
+ value = value[i+1:]
+ i = strings.Index(value, ".")
+ v.Minor, _ = strconv.Atoi(value[:i])
- pr := parsedVersion.Prerelease()
- if pr == "" || pr == "dev" {
+ value = value[i+1:]
+ i = strings.Index(value, "-")
+ if i == -1 {
+ v.Patch, _ = strconv.Atoi(value)
return v
}
- split := strings.Split(pr, ".")
- if len(split) > 1 {
- v.RC, _ = strconv.Atoi(split[1])
+ v.Patch, _ = strconv.Atoi(value[:i])
+
+ value = value[i+1:]
+ i = strings.Index(value, ".")
+ if i == -1 {
+ // Prerelease doesn't have a specific number.
+ return v
+ }
+
+ value = value[i+1:]
+ if len(value) == 0 {
+ return v
}
+ v.RC, _ = strconv.Atoi(value)
return v
}
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/libddwaf.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/libddwaf.go
index 2eabee7ea..b2b0ed364 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/libddwaf.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/libddwaf.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build (linux || darwin) && (amd64 || arm64) && !go1.27 && !datadog.no_waf && (cgo || appsec)
package bindings
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl.go
index 0cba89066..b9fedfbf2 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build (linux || darwin) && (amd64 || arm64) && !go1.27 && !datadog.no_waf && (cgo || appsec)
package bindings
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl_unsupported.go
index 9babd7037..b52e38f15 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl_unsupported.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/bindings/waf_dl_unsupported.go
@@ -4,7 +4,7 @@
// Copyright 2016-present Datadog, Inc.
// Build when the target OS or architecture are not supported
-//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.26 || datadog.no_waf || (!cgo && !appsec)
+//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.27 || datadog.no_waf || (!cgo && !appsec)
package bindings
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/.version b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/.version
index 72f3c1dac..ff6d09b31 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/.version
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/.version
@@ -1 +1 @@
-1.29.0
\ No newline at end of file
+1.30.0
\ No newline at end of file
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_darwin.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_darwin.go
index b6c897202..6c6b2e53c 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_darwin.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_darwin.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build darwin && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build darwin && (amd64 || arm64) && !go1.27 && !datadog.no_waf && (cgo || appsec)
package lib
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_linux.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_linux.go
index d9f38178d..dca0dc580 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_linux.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/dump_waf_linux.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux && (amd64 || arm64) && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build linux && (amd64 || arm64) && !go1.27 && !datadog.no_waf && (cgo || appsec)
package lib
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_amd64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_amd64.go
index 52d7511b3..cef1d0090 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_amd64.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_amd64.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build darwin && amd64 && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build darwin && amd64 && !go1.27 && !datadog.no_waf && (cgo || appsec)
package lib
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_arm64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_arm64.go
index cc8498738..1767924a5 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_arm64.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_darwin_arm64.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build darwin && arm64 && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build darwin && arm64 && !go1.27 && !datadog.no_waf && (cgo || appsec)
package lib
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_amd64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_amd64.go
index 4cc9b52c7..469fdc54e 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_amd64.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_amd64.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux && amd64 && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build linux && amd64 && !go1.27 && !datadog.no_waf && (cgo || appsec)
package lib
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_arm64.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_arm64.go
index 7d2d299ae..4d94ce43a 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_arm64.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/lib_linux_arm64.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build linux && arm64 && !go1.26 && !datadog.no_waf && (cgo || appsec)
+//go:build linux && arm64 && !go1.27 && !datadog.no_waf && (cgo || appsec)
package lib
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-amd64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-amd64.dylib.gz
index 6c399f29e..9eb0f93f3 100644
Binary files a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-amd64.dylib.gz and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-amd64.dylib.gz differ
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-arm64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-arm64.dylib.gz
index fbd1fb729..a83994040 100644
Binary files a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-arm64.dylib.gz and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-darwin-arm64.dylib.gz differ
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-amd64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-amd64.so.gz
index e75d10bc7..e863b8b7e 100644
Binary files a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-amd64.so.gz and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-amd64.so.gz differ
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-arm64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-arm64.so.gz
index 5c15a29ff..1e5fe8146 100644
Binary files a/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-arm64.so.gz and b/vendor/github.com/DataDog/go-libddwaf/v4/internal/lib/libddwaf-linux-arm64.so.gz differ
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_cgo.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_cgo.go
index 8f0715174..ec76ef4d3 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_cgo.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_cgo.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && cgo
+//go:build (linux || darwin) && (amd64 || arm64) && !go1.27 && !datadog.no_waf && cgo
package log
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_purego.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_purego.go
index 7bbc92acf..67a7915b7 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_purego.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_purego.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build (linux || darwin) && (amd64 || arm64) && !go1.26 && !datadog.no_waf && !cgo && appsec
+//go:build (linux || darwin) && (amd64 || arm64) && !go1.27 && !datadog.no_waf && !cgo && appsec
package log
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_unsupported.go
index f548fec11..0c49226c5 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_unsupported.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/log/log_unsupported.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.26 || datadog.no_waf || (!cgo && !appsec)
+//go:build (!linux && !darwin) || (!amd64 && !arm64) || go1.27 || datadog.no_waf || (!cgo && !appsec)
package log
diff --git a/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_go.go b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_go.go
index 7e8d74524..e0621eb50 100644
--- a/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_go.go
+++ b/vendor/github.com/DataDog/go-libddwaf/v4/internal/support/waf_unsupported_go.go
@@ -4,7 +4,7 @@
// Copyright 2016-present Datadog, Inc.
// Unsupported Go versions (>=)
-//go:build go1.26
+//go:build go1.27
package support
diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore
deleted file mode 100644
index 6b061e617..000000000
--- a/vendor/github.com/Masterminds/semver/v3/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-_fuzz/
\ No newline at end of file
diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
deleted file mode 100644
index fbc633259..000000000
--- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-run:
- deadline: 2m
-
-linters:
- disable-all: true
- enable:
- - misspell
- - govet
- - staticcheck
- - errcheck
- - unparam
- - ineffassign
- - nakedret
- - gocyclo
- - dupl
- - goimports
- - revive
- - gosec
- - gosimple
- - typecheck
- - unused
-
-linters-settings:
- gofmt:
- simplify: true
- dupl:
- threshold: 600
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
deleted file mode 100644
index fabe5e43d..000000000
--- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
+++ /dev/null
@@ -1,268 +0,0 @@
-# Changelog
-
-## 3.4.0 (2025-06-27)
-
-### Added
-
-- #268: Added property to Constraints to include prereleases for Check and Validate
-
-### Changed
-
-- #263: Updated Go testing for 1.24, 1.23, and 1.22
-- #269: Updated the error message handling for message case and wrapping errors
-- #266: Restore the ability to have leading 0's when parsing with NewVersion.
- Opt-out of this by setting CoerceNewVersion to false.
-
-### Fixed
-
-- #257: Fixed the CodeQL link (thanks @dmitris)
-- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out
- of this by setting DetailedNewVersionErrors to false for faster performance.
-- #267: Handle pre-releases for an "and" group if one constraint includes them
-
-## 3.3.1 (2024-11-19)
-
-### Fixed
-
-- #253: Fix for allowing some version that were invalid
-
-## 3.3.0 (2024-08-27)
-
-### Added
-
-- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
-- #213: nil version equality checking (thanks @KnutZuidema)
-
-### Changed
-
-- #241: Simplify StrictNewVersion parsing (thanks @grosser)
-- Testing support up through Go 1.23
-- Minimum version set to 1.21 as this is what's tested now
-- Fuzz testing now supports caching
-
-## 3.2.1 (2023-04-10)
-
-### Changed
-
-- #198: Improved testing around pre-release names
-- #200: Improved code scanning with addition of CodeQL
-- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
-- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
-- #203: Docs updated for security details
-
-### Fixed
-
-- #199: Fixed issue with range transformations
-
-## 3.2.0 (2022-11-28)
-
-### Added
-
-- #190: Added text marshaling and unmarshaling
-- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg)
-- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker)
-- #179: Added New() version constructor (thanks @kazhuravlev)
-
-### Changed
-
-- #182/#183: Updated CI testing setup
-
-### Fixed
-
-- #186: Fixing issue where validation of constraint section gave false positives
-- #176: Fix constraints check with *-0 (thanks @mtt0)
-- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni)
-- #161: Fixed godoc (thanks @afirth)
-
-## 3.1.1 (2020-11-23)
-
-### Fixed
-
-- #158: Fixed issue with generated regex operation order that could cause problem
-
-## 3.1.0 (2020-04-15)
-
-### Added
-
-- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah)
-
-### Changed
-
-- #148: More accurate validation messages on constraints
-
-## 3.0.3 (2019-12-13)
-
-### Fixed
-
-- #141: Fixed issue with <= comparison
-
-## 3.0.2 (2019-11-14)
-
-### Fixed
-
-- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos)
-
-## 3.0.1 (2019-09-13)
-
-### Fixed
-
-- #125: Fixes issue with module path for v3
-
-## 3.0.0 (2019-09-12)
-
-This is a major release of the semver package which includes API changes. The Go
-API is compatible with ^1. The Go API was not changed because many people are using
-`go get` without Go modules for their applications and API breaking changes cause
-errors which we have or would need to support.
-
-The changes in this release are the handling based on the data passed into the
-functions. These are described in the added and changed sections below.
-
-### Added
-
-- StrictNewVersion function. This is similar to NewVersion but will return an
- error if the version passed in is not a strict semantic version. For example,
- 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly
- speaking semantic versions. This function is faster, performs fewer operations,
- and uses fewer allocations than NewVersion.
-- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint.
- The Makefile contains the operations used. For more information on you can start
- on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing
-- Now using Go modules
-
-### Changed
-
-- NewVersion has proper prerelease and metadata validation with error messages
- to signal an issue with either of them
-- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the
- version is >=1 the ^ ranges works the same as v1. For major versions of 0 the
- rules have changed. The minor version is treated as the stable version unless
- a patch is specified and then it is equivalent to =. One difference from npm/js
- is that prereleases there are only to a specific version (e.g. 1.2.3).
- Prereleases here look over multiple versions and follow semantic version
- ordering rules. This pattern now follows along with the expected and requested
- handling of this packaged by numerous users.
-
-## 1.5.0 (2019-09-11)
-
-### Added
-
-- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
-
-### Changed
-
-- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
-- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
-- #72: Adding docs comment pointing to vert for a cli
-- #71: Update the docs on pre-release comparator handling
-- #89: Test with new go versions (thanks @thedevsaddam)
-- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
-
-### Fixed
-
-- #78: Fix unchecked error in example code (thanks @ravron)
-- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
-- #97: Fixed copyright file for proper display on GitHub
-- #107: Fix handling prerelease when sorting alphanum and num
-- #109: Fixed where Validate sometimes returns wrong message on error
-
-## 1.4.2 (2018-04-10)
-
-### Changed
-
-- #72: Updated the docs to point to vert for a console appliaction
-- #71: Update the docs on pre-release comparator handling
-
-### Fixed
-
-- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
-
-## 1.4.1 (2018-04-02)
-
-### Fixed
-
-- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
-
-## 1.4.0 (2017-10-04)
-
-### Changed
-
-- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
-
-## 1.3.1 (2017-07-10)
-
-### Fixed
-
-- Fixed #57: number comparisons in prerelease sometimes inaccurate
-
-## 1.3.0 (2017-05-02)
-
-### Added
-
-- #45: Added json (un)marshaling support (thanks @mh-cbon)
-- Stability marker. See https://masterminds.github.io/stability/
-
-### Fixed
-
-- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
-
-### Changed
-
-- #55: The godoc icon moved from png to svg
-
-## 1.2.3 (2017-04-03)
-
-### Fixed
-
-- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
-
-## Release 1.2.2 (2016-12-13)
-
-### Fixed
-
-- #34: Fixed issue where hyphen range was not working with pre-release parsing.
-
-## Release 1.2.1 (2016-11-28)
-
-### Fixed
-
-- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
- properly.
-
-## Release 1.2.0 (2016-11-04)
-
-### Added
-
-- #20: Added MustParse function for versions (thanks @adamreese)
-- #15: Added increment methods on versions (thanks @mh-cbon)
-
-### Fixed
-
-- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
- might not satisfy the intended compatibility. The change here ignores pre-releases
- on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
- constraint. For example, `^1.2.3` will ignore pre-releases while
- `^1.2.3-alpha` will include them.
-
-## Release 1.1.1 (2016-06-30)
-
-### Changed
-
-- Issue #9: Speed up version comparison performance (thanks @sdboyer)
-- Issue #8: Added benchmarks (thanks @sdboyer)
-- Updated Go Report Card URL to new location
-- Updated Readme to add code snippet formatting (thanks @mh-cbon)
-- Updating tagging to v[SemVer] structure for compatibility with other tools.
-
-## Release 1.1.0 (2016-03-11)
-
-- Issue #2: Implemented validation to provide reasons a versions failed a
- constraint.
-
-## Release 1.0.1 (2015-12-31)
-
-- Fixed #1: * constraint failing on valid versions.
-
-## Release 1.0.0 (2015-10-20)
-
-- Initial release
diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
deleted file mode 100644
index 9ff7da9c4..000000000
--- a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2014-2019, Matt Butcher and Matt Farina
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
deleted file mode 100644
index 9ca87a2c7..000000000
--- a/vendor/github.com/Masterminds/semver/v3/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-GOPATH=$(shell go env GOPATH)
-GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
-
-.PHONY: lint
-lint: $(GOLANGCI_LINT)
- @echo "==> Linting codebase"
- @$(GOLANGCI_LINT) run
-
-.PHONY: test
-test:
- @echo "==> Running tests"
- GO111MODULE=on go test -v
-
-.PHONY: test-cover
-test-cover:
- @echo "==> Running Tests with coverage"
- GO111MODULE=on go test -cover .
-
-.PHONY: fuzz
-fuzz:
- @echo "==> Running Fuzz Tests"
- go env GOCACHE
- go test -fuzz=FuzzNewVersion -fuzztime=15s .
- go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
- go test -fuzz=FuzzNewConstraint -fuzztime=15s .
-
-$(GOLANGCI_LINT):
- # Install golangci-lint. The configuration for it is in the .golangci.yml
- # file in the root of the repository
- echo ${GOPATH}
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
deleted file mode 100644
index 2f56c676a..000000000
--- a/vendor/github.com/Masterminds/semver/v3/README.md
+++ /dev/null
@@ -1,274 +0,0 @@
-# SemVer
-
-The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
-
-* Parse semantic versions
-* Sort semantic versions
-* Check if a semantic version fits within a set of constraints
-* Optionally work with a `v` prefix
-
-[](https://masterminds.github.io/stability/active.html)
-[](https://github.com/Masterminds/semver/actions)
-[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
-[](https://goreportcard.com/report/github.com/Masterminds/semver)
-
-## Package Versions
-
-Note, import `github.com/Masterminds/semver/v3` to use the latest version.
-
-There are three major versions fo the `semver` package.
-
-* 3.x.x is the stable and active version. This version is focused on constraint
- compatibility for range handling in other tools from other languages. It has
- a similar API to the v1 releases. The development of this version is on the master
- branch. The documentation for this version is below.
-* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
- no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
- There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
-* 1.x.x is the original release. It is no longer maintained. You should use the
- v3 release instead. You can read the documentation for the 1.x.x release
- [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
-
-## Parsing Semantic Versions
-
-There are two functions that can parse semantic versions. The `StrictNewVersion`
-function only parses valid version 2 semantic versions as outlined in the
-specification. The `NewVersion` function attempts to coerce a version into a
-semantic version and parse it. For example, if there is a leading v or a version
-listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid
-semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
-that can be sorted, compared, and used in constraints.
-
-When parsing a version an error is returned if there is an issue parsing the
-version. For example,
-
- v, err := semver.NewVersion("1.2.3-beta.1+build345")
-
-The version object has methods to get the parts of the version, compare it to
-other versions, convert the version back into a string, and get the original
-string. Getting the original string is useful if the semantic version was coerced
-into a valid form.
-
-There are package level variables that affect how `NewVersion` handles parsing.
-
-- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant
- versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch
- part. This enables the use of CalVer in versions even when not compliant with SemVer.
- When set to `false` less coercion work is done.
-- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when
- `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true`
- it can provide some more insight into why a version is invalid. Setting
- `DetailedNewVersionErrors` to `false` is faster on performance but provides less
- detailed error messages if a version fails to parse.
-
-## Sorting Semantic Versions
-
-A set of versions can be sorted using the `sort` package from the standard library.
-For example,
-
-```go
-raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
-vs := make([]*semver.Version, len(raw))
-for i, r := range raw {
- v, err := semver.NewVersion(r)
- if err != nil {
- t.Errorf("Error parsing version: %s", err)
- }
-
- vs[i] = v
-}
-
-sort.Sort(semver.Collection(vs))
-```
-
-## Checking Version Constraints
-
-There are two methods for comparing versions. One uses comparison methods on
-`Version` instances and the other uses `Constraints`. There are some important
-differences to notes between these two methods of comparison.
-
-1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include pre-releases
- within the comparison. It will provide an answer that is valid with the
- comparison section of the spec at https://semver.org/#spec-item-11
-2. When constraint checking is used for checks or validation it will follow a
- different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering pre-releases to be invalid if the
- ranges does not include one. If you want to have it include pre-releases a
- simple solution is to include `-0` in your range.
-3. Constraint ranges can have some complex rules including the shorthand use of
- ~ and ^. For more details on those see the options below.
-
-There are differences between the two methods or checking versions because the
-comparison methods on `Version` follow the specification while comparison ranges
-are not part of the specification. Different packages and tools have taken it
-upon themselves to come up with range rules. This has resulted in differences.
-For example, npm/js and Cargo/Rust follow similar patterns while PHP has a
-different pattern for ^. The comparison features in this package follow the
-npm/js and Cargo/Rust lead because applications using it have followed similar
-patters with their versions.
-
-Checking a version against version constraints is one of the most featureful
-parts of the package.
-
-```go
-c, err := semver.NewConstraint(">= 1.2.3")
-if err != nil {
- // Handle constraint not being parsable.
-}
-
-v, err := semver.NewVersion("1.3")
-if err != nil {
- // Handle version not being parsable.
-}
-// Check if the version meets the constraints. The variable a will be true.
-a := c.Check(v)
-```
-
-### Basic Comparisons
-
-There are two elements to the comparisons. First, a comparison string is a list
-of space or comma separated AND comparisons. These are then separated by || (OR)
-comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
-comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
-greater than or equal to 4.2.3.
-
-The basic comparisons are:
-
-* `=`: equal (aliased to no operator)
-* `!=`: not equal
-* `>`: greater than
-* `<`: less than
-* `>=`: greater than or equal to
-* `<=`: less than or equal to
-
-### Working With Prerelease Versions
-
-Pre-releases, for those not familiar with them, are used for software releases
-prior to stable or generally available releases. Examples of pre-releases include
-development, alpha, beta, and release candidate releases. A pre-release may be
-a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
-order of precedence, pre-releases come before their associated releases. In this
-example `1.2.3-beta.1 < 1.2.3`.
-
-According to the Semantic Version specification, pre-releases may not be
-API compliant with their release counterpart. It says,
-
-> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
-
-SemVer's comparisons using constraints without a pre-release comparator will skip
-pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
-at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
-
-The reason for the `0` as a pre-release version in the example comparison is
-because pre-releases can only contain ASCII alphanumerics and hyphens (along with
-`.` separators), per the spec. Sorting happens in ASCII sort order, again per the
-spec. The lowest character is a `0` in ASCII sort order
-(see an [ASCII Table](http://www.asciitable.com/))
-
-Understanding ASCII sort ordering is important because A-Z comes before a-z. That
-means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
-sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
-the spec specifies.
-
-The `Constraints` instance returned from `semver.NewConstraint()` has a property
-`IncludePrerelease` that, when set to true, will return prerelease versions when calls
-to `Check()` and `Validate()` are made.
-
-### Hyphen Range Comparisons
-
-There are multiple methods to handle ranges and the first is hyphens ranges.
-These look like:
-
-* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
-* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
-
-Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
-parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
-
-### Wildcards In Comparisons
-
-The `x`, `X`, and `*` characters can be used as a wildcard character. This works
-for all comparison operators. When used on the `=` operator it falls
-back to the patch level comparison (see tilde below). For example,
-
-* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
-* `>= 1.2.x` is equivalent to `>= 1.2.0`
-* `<= 2.x` is equivalent to `< 3`
-* `*` is equivalent to `>= 0.0.0`
-
-### Tilde Range Comparisons (Patch)
-
-The tilde (`~`) comparison operator is for patch level ranges when a minor
-version is specified and major level changes when the minor number is missing.
-For example,
-
-* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
-* `~1` is equivalent to `>= 1, < 2`
-* `~2.3` is equivalent to `>= 2.3, < 2.4`
-* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
-* `~1.x` is equivalent to `>= 1, < 2`
-
-### Caret Range Comparisons (Major)
-
-The caret (`^`) comparison operator is for major level changes once a stable
-(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
-as the API stability level. This is useful when comparisons of API versions as a
-major change is API breaking. For example,
-
-* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
-* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
-* `^2.3` is equivalent to `>= 2.3, < 3`
-* `^2.x` is equivalent to `>= 2.0.0, < 3`
-* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
-* `^0.2` is equivalent to `>=0.2.0 <0.3.0`
-* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
-* `^0.0` is equivalent to `>=0.0.0 <0.1.0`
-* `^0` is equivalent to `>=0.0.0 <1.0.0`
-
-## Validation
-
-In addition to testing a version against a constraint, a version can be validated
-against a constraint. When validation fails a slice of errors containing why a
-version didn't meet the constraint is returned. For example,
-
-```go
-c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
-if err != nil {
- // Handle constraint not being parseable.
-}
-
-v, err := semver.NewVersion("1.3")
-if err != nil {
- // Handle version not being parseable.
-}
-
-// Validate a version against a constraint.
-a, msgs := c.Validate(v)
-// a is false
-for _, m := range msgs {
- fmt.Println(m)
-
- // Loops over the errors which would read
- // "1.3 is greater than 1.2.3"
- // "1.3 is less than 1.4"
-}
-```
-
-## Contribute
-
-If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
-or [create a pull request](https://github.com/Masterminds/semver/pulls).
-
-## Security
-
-Security is an important consideration for this project. The project currently
-uses the following tools to help discover security issues:
-
-* [CodeQL](https://codeql.github.com)
-* [gosec](https://github.com/securego/gosec)
-* Daily Fuzz testing
-
-If you believe you have found a security vulnerability you can privately disclose
-it through the [GitHub security page](https://github.com/Masterminds/semver/security).
diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md
deleted file mode 100644
index a30a66b1f..000000000
--- a/vendor/github.com/Masterminds/semver/v3/SECURITY.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Security Policy
-
-## Supported Versions
-
-The following versions of semver are currently supported:
-
-| Version | Supported |
-| ------- | ------------------ |
-| 3.x | :white_check_mark: |
-| 2.x | :x: |
-| 1.x | :x: |
-
-Fixes are only released for the latest minor version in the form of a patch release.
-
-## Reporting a Vulnerability
-
-You can privately disclose a vulnerability through GitHubs
-[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories)
-mechanism.
diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go
deleted file mode 100644
index a78235895..000000000
--- a/vendor/github.com/Masterminds/semver/v3/collection.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package semver
-
-// Collection is a collection of Version instances and implements the sort
-// interface. See the sort package for more details.
-// https://golang.org/pkg/sort/
-type Collection []*Version
-
-// Len returns the length of a collection. The number of Version instances
-// on the slice.
-func (c Collection) Len() int {
- return len(c)
-}
-
-// Less is needed for the sort interface to compare two Version objects on the
-// slice. If checks if one is less than the other.
-func (c Collection) Less(i, j int) bool {
- return c[i].LessThan(c[j])
-}
-
-// Swap is needed for the sort interface to replace the Version objects
-// at two different positions in the slice.
-func (c Collection) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go
deleted file mode 100644
index 8b7a10f83..000000000
--- a/vendor/github.com/Masterminds/semver/v3/constraints.go
+++ /dev/null
@@ -1,601 +0,0 @@
-package semver
-
-import (
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strings"
-)
-
-// Constraints is one or more constraint that a semantic version can be
-// checked against.
-type Constraints struct {
- constraints [][]*constraint
- containsPre []bool
-
- // IncludePrerelease specifies if pre-releases should be included in
- // the results. Note, if a constraint range has a prerelease than
- // prereleases will be included for that AND group even if this is
- // set to false.
- IncludePrerelease bool
-}
-
-// NewConstraint returns a Constraints instance that a Version instance can
-// be checked against. If there is a parse error it will be returned.
-func NewConstraint(c string) (*Constraints, error) {
-
- // Rewrite - ranges into a comparison operation.
- c = rewriteRange(c)
-
- ors := strings.Split(c, "||")
- lenors := len(ors)
- or := make([][]*constraint, lenors)
- hasPre := make([]bool, lenors)
- for k, v := range ors {
- // Validate the segment
- if !validConstraintRegex.MatchString(v) {
- return nil, fmt.Errorf("improper constraint: %s", v)
- }
-
- cs := findConstraintRegex.FindAllString(v, -1)
- if cs == nil {
- cs = append(cs, v)
- }
- result := make([]*constraint, len(cs))
- for i, s := range cs {
- pc, err := parseConstraint(s)
- if err != nil {
- return nil, err
- }
-
- // If one of the constraints has a prerelease record this.
- // This information is used when checking all in an "and"
- // group to ensure they all check for prereleases.
- if pc.con.pre != "" {
- hasPre[k] = true
- }
-
- result[i] = pc
- }
- or[k] = result
- }
-
- o := &Constraints{
- constraints: or,
- containsPre: hasPre,
- }
- return o, nil
-}
-
-// Check tests if a version satisfies the constraints.
-func (cs Constraints) Check(v *Version) bool {
- // TODO(mattfarina): For v4 of this library consolidate the Check and Validate
- // functions as the underlying functions make that possible now.
- // loop over the ORs and check the inner ANDs
- for i, o := range cs.constraints {
- joy := true
- for _, c := range o {
- if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check {
- joy = false
- break
- }
- }
-
- if joy {
- return true
- }
- }
-
- return false
-}
-
-// Validate checks if a version satisfies a constraint. If not a slice of
-// reasons for the failure are returned in addition to a bool.
-func (cs Constraints) Validate(v *Version) (bool, []error) {
- // loop over the ORs and check the inner ANDs
- var e []error
-
- // Capture the prerelease message only once. When it happens the first time
- // this var is marked
- var prerelesase bool
- for i, o := range cs.constraints {
- joy := true
- for _, c := range o {
- // Before running the check handle the case there the version is
- // a prerelease and the check is not searching for prereleases.
- if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" {
- if !prerelesase {
- em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- e = append(e, em)
- prerelesase = true
- }
- joy = false
-
- } else {
-
- if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil {
- e = append(e, err)
- joy = false
- }
- }
- }
-
- if joy {
- return true, []error{}
- }
- }
-
- return false, e
-}
-
-func (cs Constraints) String() string {
- buf := make([]string, len(cs.constraints))
- var tmp bytes.Buffer
-
- for k, v := range cs.constraints {
- tmp.Reset()
- vlen := len(v)
- for kk, c := range v {
- tmp.WriteString(c.string())
-
- // Space separate the AND conditions
- if vlen > 1 && kk < vlen-1 {
- tmp.WriteString(" ")
- }
- }
- buf[k] = tmp.String()
- }
-
- return strings.Join(buf, " || ")
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (cs *Constraints) UnmarshalText(text []byte) error {
- temp, err := NewConstraint(string(text))
- if err != nil {
- return err
- }
-
- *cs = *temp
-
- return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (cs Constraints) MarshalText() ([]byte, error) {
- return []byte(cs.String()), nil
-}
-
-var constraintOps map[string]cfunc
-var constraintRegex *regexp.Regexp
-var constraintRangeRegex *regexp.Regexp
-
-// Used to find individual constraints within a multi-constraint string
-var findConstraintRegex *regexp.Regexp
-
-// Used to validate an segment of ANDs is valid
-var validConstraintRegex *regexp.Regexp
-
-const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
- `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
- `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
-
-func init() {
- constraintOps = map[string]cfunc{
- "": constraintTildeOrEqual,
- "=": constraintTildeOrEqual,
- "!=": constraintNotEqual,
- ">": constraintGreaterThan,
- "<": constraintLessThan,
- ">=": constraintGreaterThanEqual,
- "=>": constraintGreaterThanEqual,
- "<=": constraintLessThanEqual,
- "=<": constraintLessThanEqual,
- "~": constraintTilde,
- "~>": constraintTilde,
- "^": constraintCaret,
- }
-
- ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
-
- constraintRegex = regexp.MustCompile(fmt.Sprintf(
- `^\s*(%s)\s*(%s)\s*$`,
- ops,
- cvRegex))
-
- constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
- `\s*(%s)\s+-\s+(%s)\s*`,
- cvRegex, cvRegex))
-
- findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
- `(%s)\s*(%s)`,
- ops,
- cvRegex))
-
- // The first time a constraint shows up will look slightly different from
- // future times it shows up due to a leading space or comma in a given
- // string.
- validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
- `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`,
- ops,
- cvRegex,
- ops,
- cvRegex))
-}
-
-// An individual constraint
-type constraint struct {
- // The version used in the constraint check. For example, if a constraint
- // is '<= 2.0.0' the con a version instance representing 2.0.0.
- con *Version
-
- // The original parsed version (e.g., 4.x from != 4.x)
- orig string
-
- // The original operator for the constraint
- origfunc string
-
- // When an x is used as part of the version (e.g., 1.x)
- minorDirty bool
- dirty bool
- patchDirty bool
-}
-
-// Check if a version meets the constraint
-func (c *constraint) check(v *Version, includePre bool) (bool, error) {
- return constraintOps[c.origfunc](v, c, includePre)
-}
-
-// String prints an individual constraint into a string
-func (c *constraint) string() string {
- return c.origfunc + c.orig
-}
-
-type cfunc func(v *Version, c *constraint, includePre bool) (bool, error)
-
-func parseConstraint(c string) (*constraint, error) {
- if len(c) > 0 {
- m := constraintRegex.FindStringSubmatch(c)
- if m == nil {
- return nil, fmt.Errorf("improper constraint: %s", c)
- }
-
- cs := &constraint{
- orig: m[2],
- origfunc: m[1],
- }
-
- ver := m[2]
- minorDirty := false
- patchDirty := false
- dirty := false
- if isX(m[3]) || m[3] == "" {
- ver = fmt.Sprintf("0.0.0%s", m[6])
- dirty = true
- } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
- minorDirty = true
- dirty = true
- ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
- } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" {
- dirty = true
- patchDirty = true
- ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
- }
-
- con, err := NewVersion(ver)
- if err != nil {
-
- // The constraintRegex should catch any regex parsing errors. So,
- // we should never get here.
- return nil, errors.New("constraint parser error")
- }
-
- cs.con = con
- cs.minorDirty = minorDirty
- cs.patchDirty = patchDirty
- cs.dirty = dirty
-
- return cs, nil
- }
-
- // The rest is the special case where an empty string was passed in which
- // is equivalent to * or >=0.0.0
- con, err := StrictNewVersion("0.0.0")
- if err != nil {
-
- // The constraintRegex should catch any regex parsing errors. So,
- // we should never get here.
- return nil, errors.New("constraint parser error")
- }
-
- cs := &constraint{
- con: con,
- orig: c,
- origfunc: "",
- minorDirty: false,
- patchDirty: false,
- dirty: true,
- }
- return cs, nil
-}
-
-// Constraint functions
-func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) {
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- if c.dirty {
- if c.con.Major() != v.Major() {
- return true, nil
- }
- if c.con.Minor() != v.Minor() && !c.minorDirty {
- return true, nil
- } else if c.minorDirty {
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- } else if c.con.Patch() != v.Patch() && !c.patchDirty {
- return true, nil
- } else if c.patchDirty {
- // Need to handle prereleases if present
- if v.Prerelease() != "" || c.con.Prerelease() != "" {
- eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- }
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- }
- }
-
- eq := v.Equal(c.con)
- if eq {
- return false, fmt.Errorf("%s is equal to %s", v, c.orig)
- }
-
- return true, nil
-}
-
-func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) {
-
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- var eq bool
-
- if !c.dirty {
- eq = v.Compare(c.con) == 1
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- }
-
- if v.Major() > c.con.Major() {
- return true, nil
- } else if v.Major() < c.con.Major() {
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- } else if c.minorDirty {
- // This is a range case such as >11. When the version is something like
- // 11.1.0 is it not > 11. For that we would need 12 or higher
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- } else if c.patchDirty {
- // This is for ranges such as >11.1. A version of 11.1.1 is not greater
- // which one of 11.2.1 is greater
- eq = v.Minor() > c.con.Minor()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
- }
-
- // If we have gotten here we are not comparing pre-preleases and can use the
- // Compare function to accomplish that.
- eq = v.Compare(c.con) == 1
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
-}
-
-func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) {
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- eq := v.Compare(c.con) < 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
-}
-
-func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
-
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- eq := v.Compare(c.con) >= 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is less than %s", v, c.orig)
-}
-
-func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- var eq bool
-
- if !c.dirty {
- eq = v.Compare(c.con) <= 0
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s is greater than %s", v, c.orig)
- }
-
- if v.Major() > c.con.Major() {
- return false, fmt.Errorf("%s is greater than %s", v, c.orig)
- } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty {
- return false, fmt.Errorf("%s is greater than %s", v, c.orig)
- }
-
- return true, nil
-}
-
-// ~*, ~>* --> >= 0.0.0 (any)
-// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
-// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
-// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
-// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
-// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
-func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) {
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- if v.LessThan(c.con) {
- return false, fmt.Errorf("%s is less than %s", v, c.orig)
- }
-
- // ~0.0.0 is a special case where all constraints are accepted. It's
- // equivalent to >= 0.0.0.
- if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
- !c.minorDirty && !c.patchDirty {
- return true, nil
- }
-
- if v.Major() != c.con.Major() {
- return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
- }
-
- if v.Minor() != c.con.Minor() && !c.minorDirty {
- return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig)
- }
-
- return true, nil
-}
-
-// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
-// it's a straight =
-func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) {
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- if c.dirty {
- return constraintTilde(v, c, includePre)
- }
-
- eq := v.Equal(c.con)
- if eq {
- return true, nil
- }
-
- return false, fmt.Errorf("%s is not equal to %s", v, c.orig)
-}
-
-// ^* --> (any)
-// ^1.2.3 --> >=1.2.3 <2.0.0
-// ^1.2 --> >=1.2.0 <2.0.0
-// ^1 --> >=1.0.0 <2.0.0
-// ^0.2.3 --> >=0.2.3 <0.3.0
-// ^0.2 --> >=0.2.0 <0.3.0
-// ^0.0.3 --> >=0.0.3 <0.0.4
-// ^0.0 --> >=0.0.0 <0.1.0
-// ^0 --> >=0.0.0 <1.0.0
-func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) {
- // The existence of prereleases is checked at the group level and passed in.
- // Exit early if the version has a prerelease but those are to be ignored.
- if v.Prerelease() != "" && !includePre {
- return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
- }
-
- // This less than handles prereleases
- if v.LessThan(c.con) {
- return false, fmt.Errorf("%s is less than %s", v, c.orig)
- }
-
- var eq bool
-
- // ^ when the major > 0 is >=x.y.z < x+1
- if c.con.Major() > 0 || c.minorDirty {
-
- // ^ has to be within a major range for > 0. Everything less than was
- // filtered out with the LessThan call above. This filters out those
- // that greater but not within the same major range.
- eq = v.Major() == c.con.Major()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
- }
-
- // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1
- if c.con.Major() == 0 && v.Major() > 0 {
- return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
- }
- // If the con Minor is > 0 it is not dirty
- if c.con.Minor() > 0 || c.patchDirty {
- eq = v.Minor() == c.con.Minor()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig)
- }
- // ^ when the minor is 0 and minor > 0 is =0.0.z
- if c.con.Minor() == 0 && v.Minor() > 0 {
- return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig)
- }
-
- // At this point the major is 0 and the minor is 0 and not dirty. The patch
- // is not dirty so we need to check if they are equal. If they are not equal
- eq = c.con.Patch() == v.Patch()
- if eq {
- return true, nil
- }
- return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig)
-}
-
-func isX(x string) bool {
- switch x {
- case "x", "*", "X":
- return true
- default:
- return false
- }
-}
-
-func rewriteRange(i string) string {
- m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
- if m == nil {
- return i
- }
- o := i
- for _, v := range m {
- t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11])
- o = strings.Replace(o, v[0], t, 1)
- }
-
- return o
-}
diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go
deleted file mode 100644
index 74f97caa5..000000000
--- a/vendor/github.com/Masterminds/semver/v3/doc.go
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
-Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
-
-Specifically it provides the ability to:
-
- - Parse semantic versions
- - Sort semantic versions
- - Check if a semantic version fits within a set of constraints
- - Optionally work with a `v` prefix
-
-# Parsing Semantic Versions
-
-There are two functions that can parse semantic versions. The `StrictNewVersion`
-function only parses valid version 2 semantic versions as outlined in the
-specification. The `NewVersion` function attempts to coerce a version into a
-semantic version and parse it. For example, if there is a leading v or a version
-listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid
-semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
-that can be sorted, compared, and used in constraints.
-
-When parsing a version an optional error can be returned if there is an issue
-parsing the version. For example,
-
- v, err := semver.NewVersion("1.2.3-beta.1+b345")
-
-The version object has methods to get the parts of the version, compare it to
-other versions, convert the version back into a string, and get the original
-string. For more details please see the documentation
-at https://godoc.org/github.com/Masterminds/semver.
-
-# Sorting Semantic Versions
-
-A set of versions can be sorted using the `sort` package from the standard library.
-For example,
-
- raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
- vs := make([]*semver.Version, len(raw))
- for i, r := range raw {
- v, err := semver.NewVersion(r)
- if err != nil {
- t.Errorf("Error parsing version: %s", err)
- }
-
- vs[i] = v
- }
-
- sort.Sort(semver.Collection(vs))
-
-# Checking Version Constraints and Comparing Versions
-
-There are two methods for comparing versions. One uses comparison methods on
-`Version` instances and the other is using Constraints. There are some important
-differences to notes between these two methods of comparison.
-
- 1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include prereleases
- within the comparison. It will provide an answer valid with the comparison
- spec section at https://semver.org/#spec-item-11
- 2. When constraint checking is used for checks or validation it will follow a
- different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering prereleases to be invalid if the
- ranges does not include on. If you want to have it include pre-releases a
- simple solution is to include `-0` in your range.
- 3. Constraint ranges can have some complex rules including the shorthard use of
- ~ and ^. For more details on those see the options below.
-
-There are differences between the two methods or checking versions because the
-comparison methods on `Version` follow the specification while comparison ranges
-are not part of the specification. Different packages and tools have taken it
-upon themselves to come up with range rules. This has resulted in differences.
-For example, npm/js and Cargo/Rust follow similar patterns which PHP has a
-different pattern for ^. The comparison features in this package follow the
-npm/js and Cargo/Rust lead because applications using it have followed similar
-patters with their versions.
-
-Checking a version against version constraints is one of the most featureful
-parts of the package.
-
- c, err := semver.NewConstraint(">= 1.2.3")
- if err != nil {
- // Handle constraint not being parsable.
- }
-
- v, err := semver.NewVersion("1.3")
- if err != nil {
- // Handle version not being parsable.
- }
- // Check if the version meets the constraints. The a variable will be true.
- a := c.Check(v)
-
-# Basic Comparisons
-
-There are two elements to the comparisons. First, a comparison string is a list
-of comma or space separated AND comparisons. These are then separated by || (OR)
-comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
-comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
-greater than or equal to 4.2.3. This can also be written as
-`">= 1.2, < 3.0.0 || >= 4.2.3"`
-
-The basic comparisons are:
-
- - `=`: equal (aliased to no operator)
- - `!=`: not equal
- - `>`: greater than
- - `<`: less than
- - `>=`: greater than or equal to
- - `<=`: less than or equal to
-
-# Hyphen Range Comparisons
-
-There are multiple methods to handle ranges and the first is hyphens ranges.
-These look like:
-
- - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
- - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
-
-# Wildcards In Comparisons
-
-The `x`, `X`, and `*` characters can be used as a wildcard character. This works
-for all comparison operators. When used on the `=` operator it falls
-back to the tilde operation. For example,
-
- - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
- - `>= 1.2.x` is equivalent to `>= 1.2.0`
- - `<= 2.x` is equivalent to `<= 3`
- - `*` is equivalent to `>= 0.0.0`
-
-Tilde Range Comparisons (Patch)
-
-The tilde (`~`) comparison operator is for patch level ranges when a minor
-version is specified and major level changes when the minor number is missing.
-For example,
-
- - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0`
- - `~1` is equivalent to `>= 1, < 2`
- - `~2.3` is equivalent to `>= 2.3 < 2.4`
- - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
- - `~1.x` is equivalent to `>= 1 < 2`
-
-Caret Range Comparisons (Major)
-
-The caret (`^`) comparison operator is for major level changes once a stable
-(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
-as the API stability level. This is useful when comparisons of API versions as a
-major change is API breaking. For example,
-
- - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
- - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
- - `^2.3` is equivalent to `>= 2.3, < 3`
- - `^2.x` is equivalent to `>= 2.0.0, < 3`
- - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
- - `^0.2` is equivalent to `>=0.2.0 <0.3.0`
- - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
- - `^0.0` is equivalent to `>=0.0.0 <0.1.0`
- - `^0` is equivalent to `>=0.0.0 <1.0.0`
-
-# Validation
-
-In addition to testing a version against a constraint, a version can be validated
-against a constraint. When validation fails a slice of errors containing why a
-version didn't meet the constraint is returned. For example,
-
- c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
- if err != nil {
- // Handle constraint not being parseable.
- }
-
- v, _ := semver.NewVersion("1.3")
- if err != nil {
- // Handle version not being parseable.
- }
-
- // Validate a version against a constraint.
- a, msgs := c.Validate(v)
- // a is false
- for _, m := range msgs {
- fmt.Println(m)
-
- // Loops over the errors which would read
- // "1.3 is greater than 1.2.3"
- // "1.3 is less than 1.4"
- }
-*/
-package semver
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
deleted file mode 100644
index 7a3ba7388..000000000
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ /dev/null
@@ -1,788 +0,0 @@
-package semver
-
-import (
- "bytes"
- "database/sql/driver"
- "encoding/json"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-// The compiled version of the regex created at init() is cached here so it
-// only needs to be created once.
-var versionRegex *regexp.Regexp
-var looseVersionRegex *regexp.Regexp
-
-// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are
-// not allowed in a valid semantic version. When set to true, NewVersion will coerce
-// leading 0's into a valid version.
-var CoerceNewVersion = true
-
-// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion
-// function. This is used when CoerceNewVersion is set to false. If set to false
-// ErrInvalidSemVer is returned for an invalid version. This does not apply to
-// StrictNewVersion. Setting this function to false returns errors more quickly.
-var DetailedNewVersionErrors = true
-
-var (
- // ErrInvalidSemVer is returned a version is found to be invalid when
- // being parsed.
- ErrInvalidSemVer = errors.New("invalid semantic version")
-
- // ErrEmptyString is returned when an empty string is passed in for parsing.
- ErrEmptyString = errors.New("version string empty")
-
- // ErrInvalidCharacters is returned when invalid characters are found as
- // part of a version
- ErrInvalidCharacters = errors.New("invalid characters in version")
-
- // ErrSegmentStartsZero is returned when a version segment starts with 0.
- // This is invalid in SemVer.
- ErrSegmentStartsZero = errors.New("version segment starts with 0")
-
- // ErrInvalidMetadata is returned when the metadata is an invalid format
- ErrInvalidMetadata = errors.New("invalid metadata string")
-
- // ErrInvalidPrerelease is returned when the pre-release is an invalid format
- ErrInvalidPrerelease = errors.New("invalid prerelease string")
-)
-
-// semVerRegex is the regular expression used to parse a semantic version.
-// This is not the official regex from the semver spec. It has been modified to allow for loose handling
-// where versions like 2.1 are detected.
-const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
- `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
- `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
-
-// looseSemVerRegex is a regular expression that lets invalid semver expressions through
-// with enough detail that certain errors can be checked for.
-const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
- `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
- `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
-
-// Version represents a single semantic version.
-type Version struct {
- major, minor, patch uint64
- pre string
- metadata string
- original string
-}
-
-func init() {
- versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
- looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$")
-}
-
-const (
- num string = "0123456789"
- allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num
-)
-
-// StrictNewVersion parses a given version and returns an instance of Version or
-// an error if unable to parse the version. Only parses valid semantic versions.
-// Performs checking that can find errors within the version.
-// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x
-// releases of semver did, use the NewVersion() function.
-func StrictNewVersion(v string) (*Version, error) {
- // Parsing here does not use RegEx in order to increase performance and reduce
- // allocations.
-
- if len(v) == 0 {
- return nil, ErrEmptyString
- }
-
- // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build
- parts := strings.SplitN(v, ".", 3)
- if len(parts) != 3 {
- return nil, ErrInvalidSemVer
- }
-
- sv := &Version{
- original: v,
- }
-
- // Extract build metadata
- if strings.Contains(parts[2], "+") {
- extra := strings.SplitN(parts[2], "+", 2)
- sv.metadata = extra[1]
- parts[2] = extra[0]
- if err := validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
- // Extract build prerelease
- if strings.Contains(parts[2], "-") {
- extra := strings.SplitN(parts[2], "-", 2)
- sv.pre = extra[1]
- parts[2] = extra[0]
- if err := validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- // Validate the number segments are valid. This includes only having positive
- // numbers and no leading 0's.
- for _, p := range parts {
- if !containsOnly(p, num) {
- return nil, ErrInvalidCharacters
- }
-
- if len(p) > 1 && p[0] == '0' {
- return nil, ErrSegmentStartsZero
- }
- }
-
- // Extract major, minor, and patch
- var err error
- sv.major, err = strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return nil, err
- }
-
- sv.minor, err = strconv.ParseUint(parts[1], 10, 64)
- if err != nil {
- return nil, err
- }
-
- sv.patch, err = strconv.ParseUint(parts[2], 10, 64)
- if err != nil {
- return nil, err
- }
-
- return sv, nil
-}
-
-// NewVersion parses a given version and returns an instance of Version or
-// an error if unable to parse the version. If the version is SemVer-ish it
-// attempts to convert it to SemVer. If you want to validate it was a strict
-// semantic version at parse time see StrictNewVersion().
-func NewVersion(v string) (*Version, error) {
- if CoerceNewVersion {
- return coerceNewVersion(v)
- }
- m := versionRegex.FindStringSubmatch(v)
- if m == nil {
-
- // Disabling detailed errors is first so that it is in the fast path.
- if !DetailedNewVersionErrors {
- return nil, ErrInvalidSemVer
- }
-
- // Check for specific errors with the semver string and return a more detailed
- // error.
- m = looseVersionRegex.FindStringSubmatch(v)
- if m == nil {
- return nil, ErrInvalidSemVer
- }
- err := validateVersion(m)
- if err != nil {
- return nil, err
- }
- return nil, ErrInvalidSemVer
- }
-
- sv := &Version{
- metadata: m[5],
- pre: m[4],
- original: v,
- }
-
- var err error
- sv.major, err = strconv.ParseUint(m[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("error parsing version segment: %w", err)
- }
-
- if m[2] != "" {
- sv.minor, err = strconv.ParseUint(m[2], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("error parsing version segment: %w", err)
- }
- } else {
- sv.minor = 0
- }
-
- if m[3] != "" {
- sv.patch, err = strconv.ParseUint(m[3], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("error parsing version segment: %w", err)
- }
- } else {
- sv.patch = 0
- }
-
- // Perform some basic due diligence on the extra parts to ensure they are
- // valid.
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
- return sv, nil
-}
-
-func coerceNewVersion(v string) (*Version, error) {
- m := looseVersionRegex.FindStringSubmatch(v)
- if m == nil {
- return nil, ErrInvalidSemVer
- }
-
- sv := &Version{
- metadata: m[8],
- pre: m[5],
- original: v,
- }
-
- var err error
- sv.major, err = strconv.ParseUint(m[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("error parsing version segment: %w", err)
- }
-
- if m[2] != "" {
- sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
- if err != nil {
- return nil, fmt.Errorf("error parsing version segment: %w", err)
- }
- } else {
- sv.minor = 0
- }
-
- if m[3] != "" {
- sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
- if err != nil {
- return nil, fmt.Errorf("error parsing version segment: %w", err)
- }
- } else {
- sv.patch = 0
- }
-
- // Perform some basic due diligence on the extra parts to ensure they are
- // valid.
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
- return sv, nil
-}
-
-// New creates a new instance of Version with each of the parts passed in as
-// arguments instead of parsing a version string.
-func New(major, minor, patch uint64, pre, metadata string) *Version {
- v := Version{
- major: major,
- minor: minor,
- patch: patch,
- pre: pre,
- metadata: metadata,
- original: "",
- }
-
- v.original = v.String()
-
- return &v
-}
-
-// MustParse parses a given version and panics on error.
-func MustParse(v string) *Version {
- sv, err := NewVersion(v)
- if err != nil {
- panic(err)
- }
- return sv
-}
-
-// String converts a Version object to a string.
-// Note, if the original version contained a leading v this version will not.
-// See the Original() method to retrieve the original value. Semantic Versions
-// don't contain a leading v per the spec. Instead it's optional on
-// implementation.
-func (v Version) String() string {
- var buf bytes.Buffer
-
- fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
- if v.pre != "" {
- fmt.Fprintf(&buf, "-%s", v.pre)
- }
- if v.metadata != "" {
- fmt.Fprintf(&buf, "+%s", v.metadata)
- }
-
- return buf.String()
-}
-
-// Original returns the original value passed in to be parsed.
-func (v *Version) Original() string {
- return v.original
-}
-
-// Major returns the major version.
-func (v Version) Major() uint64 {
- return v.major
-}
-
-// Minor returns the minor version.
-func (v Version) Minor() uint64 {
- return v.minor
-}
-
-// Patch returns the patch version.
-func (v Version) Patch() uint64 {
- return v.patch
-}
-
-// Prerelease returns the pre-release version.
-func (v Version) Prerelease() string {
- return v.pre
-}
-
-// Metadata returns the metadata on the version.
-func (v Version) Metadata() string {
- return v.metadata
-}
-
-// originalVPrefix returns the original 'v' prefix if any.
-func (v Version) originalVPrefix() string {
- // Note, only lowercase v is supported as a prefix by the parser.
- if v.original != "" && v.original[:1] == "v" {
- return v.original[:1]
- }
- return ""
-}
-
-// IncPatch produces the next patch version.
-// If the current version does not have prerelease/metadata information,
-// it unsets metadata and prerelease values, increments patch number.
-// If the current version has any of prerelease or metadata information,
-// it unsets both values and keeps current patch value
-func (v Version) IncPatch() Version {
- vNext := v
- // according to http://semver.org/#spec-item-9
- // Pre-release versions have a lower precedence than the associated normal version.
- // according to http://semver.org/#spec-item-10
- // Build metadata SHOULD be ignored when determining version precedence.
- if v.pre != "" {
- vNext.metadata = ""
- vNext.pre = ""
- } else {
- vNext.metadata = ""
- vNext.pre = ""
- vNext.patch = v.patch + 1
- }
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext
-}
-
-// IncMinor produces the next minor version.
-// Sets patch to 0.
-// Increments minor number.
-// Unsets metadata.
-// Unsets prerelease status.
-func (v Version) IncMinor() Version {
- vNext := v
- vNext.metadata = ""
- vNext.pre = ""
- vNext.patch = 0
- vNext.minor = v.minor + 1
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext
-}
-
-// IncMajor produces the next major version.
-// Sets patch to 0.
-// Sets minor to 0.
-// Increments major number.
-// Unsets metadata.
-// Unsets prerelease status.
-func (v Version) IncMajor() Version {
- vNext := v
- vNext.metadata = ""
- vNext.pre = ""
- vNext.patch = 0
- vNext.minor = 0
- vNext.major = v.major + 1
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext
-}
-
-// SetPrerelease defines the prerelease value.
-// Value must not include the required 'hyphen' prefix.
-func (v Version) SetPrerelease(prerelease string) (Version, error) {
- vNext := v
- if len(prerelease) > 0 {
- if err := validatePrerelease(prerelease); err != nil {
- return vNext, err
- }
- }
- vNext.pre = prerelease
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext, nil
-}
-
-// SetMetadata defines metadata value.
-// Value must not include the required 'plus' prefix.
-func (v Version) SetMetadata(metadata string) (Version, error) {
- vNext := v
- if len(metadata) > 0 {
- if err := validateMetadata(metadata); err != nil {
- return vNext, err
- }
- }
- vNext.metadata = metadata
- vNext.original = v.originalVPrefix() + "" + vNext.String()
- return vNext, nil
-}
-
-// LessThan tests if one version is less than another one.
-func (v *Version) LessThan(o *Version) bool {
- return v.Compare(o) < 0
-}
-
-// LessThanEqual tests if one version is less or equal than another one.
-func (v *Version) LessThanEqual(o *Version) bool {
- return v.Compare(o) <= 0
-}
-
-// GreaterThan tests if one version is greater than another one.
-func (v *Version) GreaterThan(o *Version) bool {
- return v.Compare(o) > 0
-}
-
-// GreaterThanEqual tests if one version is greater or equal than another one.
-func (v *Version) GreaterThanEqual(o *Version) bool {
- return v.Compare(o) >= 0
-}
-
-// Equal tests if two versions are equal to each other.
-// Note, versions can be equal with different metadata since metadata
-// is not considered part of the comparable version.
-func (v *Version) Equal(o *Version) bool {
- if v == o {
- return true
- }
- if v == nil || o == nil {
- return false
- }
- return v.Compare(o) == 0
-}
-
-// Compare compares this version to another one. It returns -1, 0, or 1 if
-// the version smaller, equal, or larger than the other version.
-//
-// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
-// lower than the version without a prerelease. Compare always takes into account
-// prereleases. If you want to work with ranges using typical range syntaxes that
-// skip prereleases if the range is not looking for them use constraints.
-func (v *Version) Compare(o *Version) int {
- // Compare the major, minor, and patch version for differences. If a
- // difference is found return the comparison.
- if d := compareSegment(v.Major(), o.Major()); d != 0 {
- return d
- }
- if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
- return d
- }
- if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
- return d
- }
-
- // At this point the major, minor, and patch versions are the same.
- ps := v.pre
- po := o.Prerelease()
-
- if ps == "" && po == "" {
- return 0
- }
- if ps == "" {
- return 1
- }
- if po == "" {
- return -1
- }
-
- return comparePrerelease(ps, po)
-}
-
-// UnmarshalJSON implements JSON.Unmarshaler interface.
-func (v *Version) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- temp, err := NewVersion(s)
- if err != nil {
- return err
- }
- v.major = temp.major
- v.minor = temp.minor
- v.patch = temp.patch
- v.pre = temp.pre
- v.metadata = temp.metadata
- v.original = temp.original
- return nil
-}
-
-// MarshalJSON implements JSON.Marshaler interface.
-func (v Version) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (v *Version) UnmarshalText(text []byte) error {
- temp, err := NewVersion(string(text))
- if err != nil {
- return err
- }
-
- *v = *temp
-
- return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (v Version) MarshalText() ([]byte, error) {
- return []byte(v.String()), nil
-}
-
-// Scan implements the SQL.Scanner interface.
-func (v *Version) Scan(value interface{}) error {
- var s string
- s, _ = value.(string)
- temp, err := NewVersion(s)
- if err != nil {
- return err
- }
- v.major = temp.major
- v.minor = temp.minor
- v.patch = temp.patch
- v.pre = temp.pre
- v.metadata = temp.metadata
- v.original = temp.original
- return nil
-}
-
-// Value implements the Driver.Valuer interface.
-func (v Version) Value() (driver.Value, error) {
- return v.String(), nil
-}
-
-func compareSegment(v, o uint64) int {
- if v < o {
- return -1
- }
- if v > o {
- return 1
- }
-
- return 0
-}
-
-func comparePrerelease(v, o string) int {
- // split the prelease versions by their part. The separator, per the spec,
- // is a .
- sparts := strings.Split(v, ".")
- oparts := strings.Split(o, ".")
-
- // Find the longer length of the parts to know how many loop iterations to
- // go through.
- slen := len(sparts)
- olen := len(oparts)
-
- l := slen
- if olen > slen {
- l = olen
- }
-
- // Iterate over each part of the prereleases to compare the differences.
- for i := 0; i < l; i++ {
- // Since the lentgh of the parts can be different we need to create
- // a placeholder. This is to avoid out of bounds issues.
- stemp := ""
- if i < slen {
- stemp = sparts[i]
- }
-
- otemp := ""
- if i < olen {
- otemp = oparts[i]
- }
-
- d := comparePrePart(stemp, otemp)
- if d != 0 {
- return d
- }
- }
-
- // Reaching here means two versions are of equal value but have different
- // metadata (the part following a +). They are not identical in string form
- // but the version comparison finds them to be equal.
- return 0
-}
-
-func comparePrePart(s, o string) int {
- // Fastpath if they are equal
- if s == o {
- return 0
- }
-
- // When s or o are empty we can use the other in an attempt to determine
- // the response.
- if s == "" {
- if o != "" {
- return -1
- }
- return 1
- }
-
- if o == "" {
- if s != "" {
- return 1
- }
- return -1
- }
-
- // When comparing strings "99" is greater than "103". To handle
- // cases like this we need to detect numbers and compare them. According
- // to the semver spec, numbers are always positive. If there is a - at the
- // start like -99 this is to be evaluated as an alphanum. numbers always
- // have precedence over alphanum. Parsing as Uints because negative numbers
- // are ignored.
-
- oi, n1 := strconv.ParseUint(o, 10, 64)
- si, n2 := strconv.ParseUint(s, 10, 64)
-
- // The case where both are strings compare the strings
- if n1 != nil && n2 != nil {
- if s > o {
- return 1
- }
- return -1
- } else if n1 != nil {
- // o is a string and s is a number
- return -1
- } else if n2 != nil {
- // s is a string and o is a number
- return 1
- }
- // Both are numbers
- if si > oi {
- return 1
- }
- return -1
-}
-
-// Like strings.ContainsAny but does an only instead of any.
-func containsOnly(s string, comp string) bool {
- return strings.IndexFunc(s, func(r rune) bool {
- return !strings.ContainsRune(comp, r)
- }) == -1
-}
-
-// From the spec, "Identifiers MUST comprise only
-// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty.
-// Numeric identifiers MUST NOT include leading zeroes.". These segments can
-// be dot separated.
-func validatePrerelease(p string) error {
- eparts := strings.Split(p, ".")
- for _, p := range eparts {
- if p == "" {
- return ErrInvalidPrerelease
- } else if containsOnly(p, num) {
- if len(p) > 1 && p[0] == '0' {
- return ErrSegmentStartsZero
- }
- } else if !containsOnly(p, allowed) {
- return ErrInvalidPrerelease
- }
- }
-
- return nil
-}
-
-// From the spec, "Build metadata MAY be denoted by
-// appending a plus sign and a series of dot separated identifiers immediately
-// following the patch or pre-release version. Identifiers MUST comprise only
-// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."
-func validateMetadata(m string) error {
- eparts := strings.Split(m, ".")
- for _, p := range eparts {
- if p == "" {
- return ErrInvalidMetadata
- } else if !containsOnly(p, allowed) {
- return ErrInvalidMetadata
- }
- }
- return nil
-}
-
-// validateVersion checks for common validation issues but may not catch all errors
-func validateVersion(m []string) error {
- var err error
- var v string
- if m[1] != "" {
- if len(m[1]) > 1 && m[1][0] == '0' {
- return ErrSegmentStartsZero
- }
- _, err = strconv.ParseUint(m[1], 10, 64)
- if err != nil {
- return fmt.Errorf("error parsing version segment: %w", err)
- }
- }
-
- if m[2] != "" {
- v = strings.TrimPrefix(m[2], ".")
- if len(v) > 1 && v[0] == '0' {
- return ErrSegmentStartsZero
- }
- _, err = strconv.ParseUint(v, 10, 64)
- if err != nil {
- return fmt.Errorf("error parsing version segment: %w", err)
- }
- }
-
- if m[3] != "" {
- v = strings.TrimPrefix(m[3], ".")
- if len(v) > 1 && v[0] == '0' {
- return ErrSegmentStartsZero
- }
- _, err = strconv.ParseUint(v, 10, 64)
- if err != nil {
- return fmt.Errorf("error parsing version segment: %w", err)
- }
- }
-
- if m[5] != "" {
- if err = validatePrerelease(m[5]); err != nil {
- return err
- }
- }
-
- if m[8] != "" {
- if err = validateMetadata(m[8]); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/config.go b/vendor/github.com/coredns/coredns/core/dnsserver/config.go
index 168120795..fcf9c95ce 100644
--- a/vendor/github.com/coredns/coredns/core/dnsserver/config.go
+++ b/vendor/github.com/coredns/coredns/core/dnsserver/config.go
@@ -66,6 +66,22 @@ type Config struct {
// This is nil if not specified, allowing for a default to be used.
MaxQUICWorkerPoolSize *int
+ // MaxGRPCStreams defines the maximum number of concurrent streams per gRPC connection.
+ // This is nil if not specified, allowing for a default to be used.
+ MaxGRPCStreams *int
+
+ // MaxGRPCConnections defines the maximum number of concurrent gRPC connections.
+ // This is nil if not specified, allowing for a default to be used.
+ MaxGRPCConnections *int
+
+ // MaxHTTPSConnections defines the maximum number of concurrent HTTPS connections.
+ // This is nil if not specified, allowing for a default to be used.
+ MaxHTTPSConnections *int
+
+ // MaxHTTPS3Streams defines the maximum number of concurrent QUIC streams for HTTPS3.
+ // This is nil if not specified, allowing for a default to be used.
+ MaxHTTPS3Streams *int
+
// Timeouts for TCP, TLS and HTTPS servers.
ReadTimeout time.Duration
WriteTimeout time.Duration
diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/quic.go b/vendor/github.com/coredns/coredns/core/dnsserver/quic.go
index 24684a45b..04201f220 100644
--- a/vendor/github.com/coredns/coredns/core/dnsserver/quic.go
+++ b/vendor/github.com/coredns/coredns/core/dnsserver/quic.go
@@ -53,7 +53,7 @@ func (w *DoQWriter) Close() error {
// AddPrefix adds a 2-byte prefix with the DNS message length.
func AddPrefix(b []byte) (m []byte) {
m = make([]byte, 2+len(b))
- binary.BigEndian.PutUint16(m, uint16(len(b)))
+ binary.BigEndian.PutUint16(m, uint16(len(b))) // #nosec G115 -- DNS message length fits in uint16
copy(m[2:], b)
return m
diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server_grpc.go b/vendor/github.com/coredns/coredns/core/dnsserver/server_grpc.go
index a834502c8..4bfa59988 100644
--- a/vendor/github.com/coredns/coredns/core/dnsserver/server_grpc.go
+++ b/vendor/github.com/coredns/coredns/core/dnsserver/server_grpc.go
@@ -15,17 +15,35 @@ import (
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/miekg/dns"
"github.com/opentracing/opentracing-go"
+ "golang.org/x/net/netutil"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
)
+const (
+ // maxDNSMessageBytes is the maximum size of a DNS message on the wire.
+ maxDNSMessageBytes = dns.MaxMsgSize
+
+ // maxProtobufPayloadBytes accounts for protobuf overhead.
+ // Field tag=1 (1 byte) + length varint for 65535 (3 bytes) = 4 bytes total
+ maxProtobufPayloadBytes = maxDNSMessageBytes + 4
+
+ // DefaultGRPCMaxStreams is the default maximum number of concurrent streams per connection.
+ DefaultGRPCMaxStreams = 256
+
+ // DefaultGRPCMaxConnections is the default maximum number of concurrent connections.
+ DefaultGRPCMaxConnections = 200
+)
+
// ServergRPC represents an instance of a DNS-over-gRPC server.
type ServergRPC struct {
*Server
*pb.UnimplementedDnsServiceServer
- grpcServer *grpc.Server
- listenAddr net.Addr
- tlsConfig *tls.Config
+ grpcServer *grpc.Server
+ listenAddr net.Addr
+ tlsConfig *tls.Config
+ maxStreams int
+ maxConnections int
}
// NewServergRPC returns a new CoreDNS GRPC server and compiles all plugin in to it.
@@ -49,7 +67,22 @@ func NewServergRPC(addr string, group []*Config) (*ServergRPC, error) {
tlsConfig.NextProtos = []string{"h2"}
}
- return &ServergRPC{Server: s, tlsConfig: tlsConfig}, nil
+ maxStreams := DefaultGRPCMaxStreams
+ if len(group) > 0 && group[0] != nil && group[0].MaxGRPCStreams != nil {
+ maxStreams = *group[0].MaxGRPCStreams
+ }
+
+ maxConnections := DefaultGRPCMaxConnections
+ if len(group) > 0 && group[0] != nil && group[0].MaxGRPCConnections != nil {
+ maxConnections = *group[0].MaxGRPCConnections
+ }
+
+ return &ServergRPC{
+ Server: s,
+ tlsConfig: tlsConfig,
+ maxStreams: maxStreams,
+ maxConnections: maxConnections,
+ }, nil
}
// Compile-time check to ensure ServergRPC implements the caddy.GracefulServer interface
@@ -61,21 +94,36 @@ func (s *ServergRPC) Serve(l net.Listener) error {
s.listenAddr = l.Addr()
s.m.Unlock()
+ serverOpts := []grpc.ServerOption{
+ grpc.MaxRecvMsgSize(maxProtobufPayloadBytes),
+ grpc.MaxSendMsgSize(maxProtobufPayloadBytes),
+ }
+
+ // Only set MaxConcurrentStreams if not unbounded (0)
+ if s.maxStreams > 0 {
+ serverOpts = append(serverOpts, grpc.MaxConcurrentStreams(uint32(s.maxStreams))) // #nosec G115 -- maxStreams is bounded
+ }
+
if s.Tracer() != nil {
onlyIfParent := func(parentSpanCtx opentracing.SpanContext, method string, req, resp any) bool {
return parentSpanCtx != nil
}
- intercept := otgrpc.OpenTracingServerInterceptor(s.Tracer(), otgrpc.IncludingSpans(onlyIfParent))
- s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(intercept))
- } else {
- s.grpcServer = grpc.NewServer()
+ serverOpts = append(serverOpts, grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(s.Tracer(), otgrpc.IncludingSpans(onlyIfParent))))
}
+ s.grpcServer = grpc.NewServer(serverOpts...)
+
pb.RegisterDnsServiceServer(s.grpcServer, s)
if s.tlsConfig != nil {
l = tls.NewListener(l, s.tlsConfig)
}
+
+ // Wrap listener to limit concurrent connections
+ if s.maxConnections > 0 {
+ l = netutil.LimitListener(l, s.maxConnections)
+ }
+
return s.grpcServer.Serve(l)
}
@@ -122,6 +170,9 @@ func (s *ServergRPC) Stop() (err error) {
// any normal server. We use a custom responseWriter to pick up the bytes we need to write
// back to the client as a protobuf.
func (s *ServergRPC) Query(ctx context.Context, in *pb.DnsPacket) (*pb.DnsPacket, error) {
+ if len(in.GetMsg()) > dns.MaxMsgSize {
+ return nil, fmt.Errorf("dns message exceeds size limit: %d", len(in.GetMsg()))
+ }
msg := new(dns.Msg)
err := msg.Unpack(in.GetMsg())
if err != nil {
diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go b/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go
index cf84e8c35..0d522a051 100644
--- a/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go
+++ b/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go
@@ -18,15 +18,23 @@ import (
"github.com/coredns/coredns/plugin/pkg/response"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/coredns/coredns/plugin/pkg/transport"
+
+ "golang.org/x/net/netutil"
+)
+
+const (
+ // DefaultHTTPSMaxConnections is the default maximum number of concurrent connections.
+ DefaultHTTPSMaxConnections = 200
)
// ServerHTTPS represents an instance of a DNS-over-HTTPS server.
type ServerHTTPS struct {
*Server
- httpsServer *http.Server
- listenAddr net.Addr
- tlsConfig *tls.Config
- validRequest func(*http.Request) bool
+ httpsServer *http.Server
+ listenAddr net.Addr
+ tlsConfig *tls.Config
+ validRequest func(*http.Request) bool
+ maxConnections int
}
// loggerAdapter is a simple adapter around CoreDNS logger made to implement io.Writer in order to log errors from HTTP server
@@ -81,8 +89,17 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
IdleTimeout: s.IdleTimeout,
ErrorLog: stdlog.New(&loggerAdapter{}, "", 0),
}
+ maxConnections := DefaultHTTPSMaxConnections
+ if len(group) > 0 && group[0] != nil && group[0].MaxHTTPSConnections != nil {
+ maxConnections = *group[0].MaxHTTPSConnections
+ }
+
sh := &ServerHTTPS{
- Server: s, tlsConfig: tlsConfig, httpsServer: srv, validRequest: validator,
+ Server: s,
+ tlsConfig: tlsConfig,
+ httpsServer: srv,
+ validRequest: validator,
+ maxConnections: maxConnections,
}
sh.httpsServer.Handler = sh
@@ -98,9 +115,15 @@ func (s *ServerHTTPS) Serve(l net.Listener) error {
s.listenAddr = l.Addr()
s.m.Unlock()
+ // Wrap listener to limit concurrent connections (before TLS)
+ if s.maxConnections > 0 {
+ l = netutil.LimitListener(l, s.maxConnections)
+ }
+
if s.tlsConfig != nil {
l = tls.NewListener(l, s.tlsConfig)
}
+
return s.httpsServer.Serve(l)
}
diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server_https3.go b/vendor/github.com/coredns/coredns/core/dnsserver/server_https3.go
index d6d1d85b8..ea36abbda 100644
--- a/vendor/github.com/coredns/coredns/core/dnsserver/server_https3.go
+++ b/vendor/github.com/coredns/coredns/core/dnsserver/server_https3.go
@@ -21,6 +21,11 @@ import (
"github.com/quic-go/quic-go/http3"
)
+const (
+ // DefaultHTTPS3MaxStreams is the default maximum number of concurrent QUIC streams per connection.
+ DefaultHTTPS3MaxStreams = 256
+)
+
// ServerHTTPS3 represents a DNS-over-HTTP/3 server.
type ServerHTTPS3 struct {
*Server
@@ -29,6 +34,7 @@ type ServerHTTPS3 struct {
tlsConfig *tls.Config
quicConfig *quic.Config
validRequest func(*http.Request) bool
+ maxStreams int
}
// NewServerHTTPS3 builds the HTTP/3 (DoH3) server.
@@ -63,11 +69,20 @@ func NewServerHTTPS3(addr string, group []*Config) (*ServerHTTPS3, error) {
validator = func(r *http.Request) bool { return r.URL.Path == doh.Path }
}
- // QUIC transport config
+ maxStreams := DefaultHTTPS3MaxStreams
+ if len(group) > 0 && group[0] != nil && group[0].MaxHTTPS3Streams != nil {
+ maxStreams = *group[0].MaxHTTPS3Streams
+ }
+
+ // QUIC transport config with stream limits (0 means use QUIC default)
qconf := &quic.Config{
MaxIdleTimeout: s.IdleTimeout,
Allow0RTT: true,
}
+ if maxStreams > 0 {
+ qconf.MaxIncomingStreams = int64(maxStreams)
+ qconf.MaxIncomingUniStreams = int64(maxStreams)
+ }
h3srv := &http3.Server{
Handler: nil, // set after constructing ServerHTTPS3
@@ -83,6 +98,7 @@ func NewServerHTTPS3(addr string, group []*Config) (*ServerHTTPS3, error) {
httpsServer: h3srv,
quicConfig: qconf,
validRequest: validator,
+ maxStreams: maxStreams,
}
h3srv.Handler = sh
diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server_quic.go b/vendor/github.com/coredns/coredns/core/dnsserver/server_quic.go
index b7d7fd7ff..d05db8536 100644
--- a/vendor/github.com/coredns/coredns/core/dnsserver/server_quic.go
+++ b/vendor/github.com/coredns/coredns/core/dnsserver/server_quic.go
@@ -156,12 +156,29 @@ func (s *ServerQUIC) serveQUICConnection(conn *quic.Conn) {
return
}
- // Use a bounded worker pool
- s.streamProcessPool <- struct{}{} // Acquire a worker slot, may block
- go func(st *quic.Stream, cn *quic.Conn) {
- defer func() { <-s.streamProcessPool }() // Release worker slot
- s.serveQUICStream(st, cn)
- }(stream, conn)
+ // Use a bounded worker pool with context cancellation
+ select {
+ case s.streamProcessPool <- struct{}{}:
+ // Got worker slot immediately
+ go func(st *quic.Stream, cn *quic.Conn) {
+ defer func() { <-s.streamProcessPool }() // Release worker slot
+ s.serveQUICStream(st, cn)
+ }(stream, conn)
+ default:
+ // Worker pool full, check for context cancellation
+ go func(st *quic.Stream, cn *quic.Conn) {
+ select {
+ case s.streamProcessPool <- struct{}{}:
+ // Got worker slot after waiting
+ defer func() { <-s.streamProcessPool }() // Release worker slot
+ s.serveQUICStream(st, cn)
+ case <-conn.Context().Done():
+ // Connection context was cancelled while waiting
+ st.Close()
+ return
+ }
+ }(stream, conn)
+ }
}
}
@@ -346,7 +363,8 @@ func readDOQMessage(r io.Reader) ([]byte, error) {
// A client or server receives a STREAM FIN before receiving all the bytes
// for a message indicated in the 2-octet length field.
// See https://www.rfc-editor.org/rfc/rfc9250#section-4.3.3-2.2
- if size != uint16(len(buf)) {
+ //nolint:gosec
+ if size != uint16(len(buf)) { // #nosec G115 -- buf length fits in uint16
return nil, fmt.Errorf("message size does not match 2-byte prefix")
}
diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go b/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go
index a237cbf34..c356740c1 100644
--- a/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go
+++ b/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go
@@ -16,6 +16,9 @@ var Directives = []string{
"cancel",
"tls",
"quic",
+ "grpc_server",
+ "https",
+ "https3",
"timeouts",
"multisocket",
"reload",
diff --git a/vendor/github.com/coredns/coredns/coremain/version.go b/vendor/github.com/coredns/coredns/coremain/version.go
index c61fd0216..872664d92 100644
--- a/vendor/github.com/coredns/coredns/coremain/version.go
+++ b/vendor/github.com/coredns/coredns/coremain/version.go
@@ -2,7 +2,7 @@ package coremain
// Various CoreDNS constants.
const (
- CoreVersion = "1.13.2"
+ CoreVersion = "1.14.1"
CoreName = "CoreDNS"
serverType = "dns"
)
diff --git a/vendor/github.com/coredns/coredns/plugin/bufsize/bufsize.go b/vendor/github.com/coredns/coredns/plugin/bufsize/bufsize.go
index 00556c2ba..23e65c869 100644
--- a/vendor/github.com/coredns/coredns/plugin/bufsize/bufsize.go
+++ b/vendor/github.com/coredns/coredns/plugin/bufsize/bufsize.go
@@ -18,7 +18,7 @@ type Bufsize struct {
// ServeDNS implements the plugin.Handler interface.
func (buf Bufsize) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
if option := r.IsEdns0(); option != nil && int(option.UDPSize()) > buf.Size {
- option.SetUDPSize(uint16(buf.Size))
+ option.SetUDPSize(uint16(buf.Size)) // #nosec G115 -- buffer size fits in uint16
}
return plugin.NextOrFailure(buf.Name(), buf.Next, ctx, w, r)
}
diff --git a/vendor/github.com/coredns/coredns/plugin/cache/handler.go b/vendor/github.com/coredns/coredns/plugin/cache/handler.go
index c45801ad6..b6815ee0e 100644
--- a/vendor/github.com/coredns/coredns/plugin/cache/handler.go
+++ b/vendor/github.com/coredns/coredns/plugin/cache/handler.go
@@ -34,7 +34,7 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
// in which upstream doesn't support DNSSEC, the two cache items will effectively be the same. Regardless, any
// DNSSEC RRs in the response are written to cache with the response.
- i := c.getIgnoreTTL(now, state, server)
+ i := c.getIfNotStale(now, state, server)
if i == nil {
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, ad: ad, cd: cd,
nexcept: c.nexcept, pexcept: c.pexcept, wildcardFunc: wildcardFunc(ctx)}
@@ -121,8 +121,8 @@ func (c *Cache) shouldPrefetch(i *item, now time.Time) bool {
// Name implements the Handler interface.
func (c *Cache) Name() string { return "cache" }
-// getIgnoreTTL unconditionally returns an item if it exists in the cache.
-func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item {
+// getIfNotStale returns an item if it exists in the cache and has not expired.
+func (c *Cache) getIfNotStale(now time.Time, state request.Request, server string) *item {
k := hash(state.Name(), state.QType(), state.Do(), state.Req.CheckingDisabled)
cacheRequests.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
@@ -146,6 +146,7 @@ func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string
return nil
}
+// exists unconditionally returns an item if it exists in the cache.
func (c *Cache) exists(state request.Request) *item {
k := hash(state.Name(), state.QType(), state.Do(), state.Req.CheckingDisabled)
if i, ok := c.ncache.Get(k); ok {
diff --git a/vendor/github.com/coredns/coredns/plugin/cache/item.go b/vendor/github.com/coredns/coredns/plugin/cache/item.go
index 3259d4a72..c41574c25 100644
--- a/vendor/github.com/coredns/coredns/plugin/cache/item.go
+++ b/vendor/github.com/coredns/coredns/plugin/cache/item.go
@@ -82,7 +82,7 @@ func (i *item) toMsg(m *dns.Msg, now time.Time, do bool, ad bool) *dns.Msg {
m1.RecursionAvailable = i.RecursionAvailable
m1.Rcode = i.Rcode
- ttl := uint32(i.ttl(now))
+ ttl := uint32(i.ttl(now)) // #nosec G115 -- ttl is bounded by DNS TTL limits
m1.Answer = filterRRSlice(i.Answer, ttl, true)
m1.Ns = filterRRSlice(i.Ns, ttl, true)
m1.Extra = filterRRSlice(i.Extra, ttl, true)
diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/io.go b/vendor/github.com/coredns/coredns/plugin/dnstap/io.go
index 1987b4d43..751631c76 100644
--- a/vendor/github.com/coredns/coredns/plugin/dnstap/io.go
+++ b/vendor/github.com/coredns/coredns/plugin/dnstap/io.go
@@ -70,6 +70,7 @@ func (d *dio) dial() error {
if d.proto == "tls" {
config := &tls.Config{
+ // #nosec G402 -- optional, user-configurable escape hatch for environments that cannot validate certs.
InsecureSkipVerify: d.skipVerify,
}
dialer := &net.Dialer{
diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/msg/msg.go b/vendor/github.com/coredns/coredns/plugin/dnstap/msg/msg.go
index f9d84c45a..9cbc09252 100644
--- a/vendor/github.com/coredns/coredns/plugin/dnstap/msg/msg.go
+++ b/vendor/github.com/coredns/coredns/plugin/dnstap/msg/msg.go
@@ -23,7 +23,7 @@ func SetQueryAddress(t *tap.Message, addr net.Addr) error {
t.SocketProtocol = &protoTCP
t.QueryAddress = a.IP
- p := uint32(a.Port)
+ p := uint32(a.Port) // #nosec G115 -- Port is inherently bounded (1-65535)
t.QueryPort = &p
if a.IP.To4() == nil {
@@ -34,7 +34,7 @@ func SetQueryAddress(t *tap.Message, addr net.Addr) error {
t.SocketProtocol = &protoUDP
t.QueryAddress = a.IP
- p := uint32(a.Port)
+ p := uint32(a.Port) // #nosec G115 -- Port is inherently bounded (1-65535)
t.QueryPort = &p
if a.IP.To4() == nil {
@@ -54,7 +54,7 @@ func SetResponseAddress(t *tap.Message, addr net.Addr) error {
t.SocketProtocol = &protoTCP
t.ResponseAddress = a.IP
- p := uint32(a.Port)
+ p := uint32(a.Port) // #nosec G115 -- Port is inherently bounded (1-65535)
t.ResponsePort = &p
if a.IP.To4() == nil {
@@ -65,7 +65,7 @@ func SetResponseAddress(t *tap.Message, addr net.Addr) error {
t.SocketProtocol = &protoUDP
t.ResponseAddress = a.IP
- p := uint32(a.Port)
+ p := uint32(a.Port) // #nosec G115 -- Port is inherently bounded (1-65535)
t.ResponsePort = &p
if a.IP.To4() == nil {
@@ -79,16 +79,16 @@ func SetResponseAddress(t *tap.Message, addr net.Addr) error {
// SetQueryTime sets the time of the query in t.
func SetQueryTime(t *tap.Message, ti time.Time) {
- qts := uint64(ti.Unix())
- qtn := uint32(ti.Nanosecond())
+ qts := uint64(ti.Unix()) // #nosec G115 -- Unix time fits in uint64
+ qtn := uint32(ti.Nanosecond()) // #nosec G115 -- Nanoseconds (0-999999999) fit in uint32
t.QueryTimeSec = &qts
t.QueryTimeNsec = &qtn
}
// SetResponseTime sets the time of the response in t.
func SetResponseTime(t *tap.Message, ti time.Time) {
- rts := uint64(ti.Unix())
- rtn := uint32(ti.Nanosecond())
+ rts := uint64(ti.Unix()) // #nosec G115 -- Unix time fits in uint64
+ rtn := uint32(ti.Nanosecond()) // #nosec G115 -- Nanoseconds (0-999999999) fit in uint32
t.ResponseTimeSec = &rts
t.ResponseTimeNsec = &rtn
}
diff --git a/vendor/github.com/coredns/coredns/plugin/errors/README.md b/vendor/github.com/coredns/coredns/plugin/errors/README.md
index 27ba1058a..ed91f602e 100644
--- a/vendor/github.com/coredns/coredns/plugin/errors/README.md
+++ b/vendor/github.com/coredns/coredns/plugin/errors/README.md
@@ -23,18 +23,33 @@ Extra knobs are available with an expanded syntax:
~~~
errors {
stacktrace
- consolidate DURATION REGEXP [LEVEL]
+ consolidate DURATION REGEXP [LEVEL] [show_first]
}
~~~
Option `stacktrace` will log a stacktrace during panic recovery.
-Option `consolidate` allows collecting several error messages matching the regular expression **REGEXP** during **DURATION**. After the **DURATION** since receiving the first such message, the consolidated message will be printed to standard output with
+Option `consolidate` allows collecting several error messages matching the regular expression **REGEXP** during **DURATION**. **REGEXP** must not exceed 10000 characters. After the **DURATION** since receiving the first such message, the consolidated message will be printed to standard output with
log level, which is configurable by optional option **LEVEL**. Supported options for **LEVEL** option are `warning`,`error`,`info` and `debug`.
~~~
2 errors like '^read udp .* i/o timeout$' occurred in last 30s
~~~
+If the optional `show_first` flag is specified, the first error will be logged immediately when it occurs, and then subsequent matching errors will be consolidated. When the consolidation period ends:
+- If only one error occurred, no summary is printed (since it was already logged)
+- If multiple errors occurred, a summary is printed showing the total count
+
+Example with 3 errors:
+~~~
+[WARNING] 2 example.org. A: read udp 10.0.0.1:53->8.8.8.8:53: i/o timeout
+[WARNING] 3 errors like '^read udp .* i/o timeout$' occurred in last 30s
+~~~
+
+Example with 1 error:
+~~~
+[WARNING] 2 example.org. A: read udp 10.0.0.1:53->8.8.8.8:53: i/o timeout
+~~~
+
Multiple `consolidate` options with different **DURATION** and **REGEXP** are allowed. In case if some error message corresponds to several defined regular expressions the message will be associated with the first appropriate **REGEXP**.
For better performance, it's recommended to use the `^` or `$` metacharacters in regular expression when filtering error messages by prefix or suffix, e.g. `^failed to .*`, or `.* timeout$`.
@@ -63,3 +78,16 @@ and errors with prefix "Failed to " as errors.
}
}
~~~
+
+Use the *forward* plugin and consolidate timeout errors with `show_first` option to see both
+the summary and the first occurrence of the error:
+
+~~~ corefile
+. {
+ forward . 8.8.8.8
+ errors {
+ consolidate 5m ".* i/o timeout$" warning show_first
+ consolidate 30s "^Failed to .+" error show_first
+ }
+}
+~~~
diff --git a/vendor/github.com/coredns/coredns/plugin/errors/errors.go b/vendor/github.com/coredns/coredns/plugin/errors/errors.go
index 213ec77d9..31127e41a 100644
--- a/vendor/github.com/coredns/coredns/plugin/errors/errors.go
+++ b/vendor/github.com/coredns/coredns/plugin/errors/errors.go
@@ -23,6 +23,7 @@ type pattern struct {
period time.Duration
pattern *regexp.Regexp
logCallback func(format string, v ...any)
+ showFirst bool
}
func (p *pattern) timer() *time.Timer {
@@ -46,17 +47,24 @@ func newErrorHandler() *errorHandler {
func (h *errorHandler) logPattern(i int) {
cnt := atomic.SwapUint32(&h.patterns[i].count, 0)
- if cnt > 0 {
+ if cnt == 0 {
+ return
+ }
+ if cnt > 1 || !h.patterns[i].showFirst {
h.patterns[i].logCallback("%d errors like '%s' occurred in last %s",
cnt, h.patterns[i].pattern.String(), h.patterns[i].period)
}
}
-func (h *errorHandler) inc(i int) bool {
+// consolidateError records an error occurrence for pattern i.
+// Returns false when cnt == 1 and showFirst is configured, so the error
+// will be printed by the caller using the pattern's logCallback.
+func (h *errorHandler) consolidateError(i int) bool {
if atomic.LoadUint32(&h.stopFlag) > 0 {
return false
}
- if atomic.AddUint32(&h.patterns[i].count, 1) == 1 {
+ cnt := atomic.AddUint32(&h.patterns[i].count, 1)
+ if cnt == 1 {
ind := i
t := time.AfterFunc(h.patterns[ind].period, func() {
h.logPattern(ind)
@@ -65,6 +73,9 @@ func (h *errorHandler) inc(i int) bool {
if atomic.LoadUint32(&h.stopFlag) > 0 && t.Stop() {
h.logPattern(ind)
}
+ // If showFirst is enabled, return false so the first error
+ // will be printed by the caller using the pattern's logCallback
+ return !h.patterns[i].showFirst
}
return true
}
@@ -85,16 +96,26 @@ func (h *errorHandler) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dn
if err != nil {
strErr := err.Error()
+ state := request.Request{W: w, Req: r}
+
+ // Default to error logging
+ logFunc := log.Errorf
+
for i := range h.patterns {
if h.patterns[i].pattern.MatchString(strErr) {
- if h.inc(i) {
+ if h.consolidateError(i) {
+ // Error is consolidated, no need to log
return rcode, err
}
+ // consolidateError returned false (showFirst case)
+ // Use the pattern's configured log level
+ logFunc = h.patterns[i].logCallback
break
}
}
- state := request.Request{W: w, Req: r}
- log.Errorf("%d %s %s: %s", rcode, state.Name(), state.Type(), strErr)
+
+ // Log with the appropriate log level
+ logFunc("%d %s %s: %s", rcode, state.Name(), state.Type(), strErr)
}
return rcode, err
diff --git a/vendor/github.com/coredns/coredns/plugin/errors/setup.go b/vendor/github.com/coredns/coredns/plugin/errors/setup.go
index 78b12f3a6..b9aa649ba 100644
--- a/vendor/github.com/coredns/coredns/plugin/errors/setup.go
+++ b/vendor/github.com/coredns/coredns/plugin/errors/setup.go
@@ -9,6 +9,10 @@ import (
"github.com/coredns/coredns/plugin"
)
+// maxRegexpLen is a hard limit on the length of a regex pattern to prevent
+// OOM during regex compilation with malicious input.
+const maxRegexpLen = 10000
+
func init() { plugin.Register("errors", setup) }
func setup(c *caddy.Controller) error {
@@ -71,39 +75,62 @@ func errorsParse(c *caddy.Controller) (*errorHandler, error) {
func parseConsolidate(c *caddy.Controller) (*pattern, error) {
args := c.RemainingArgs()
- if len(args) < 2 || len(args) > 3 {
+ if len(args) < 2 || len(args) > 4 {
return nil, c.ArgErr()
}
p, err := time.ParseDuration(args[0])
if err != nil {
return nil, c.Err(err.Error())
}
+ if len(args[1]) > maxRegexpLen {
+ return nil, c.Errf("regex pattern too long: %d > %d", len(args[1]), maxRegexpLen)
+ }
re, err := regexp.Compile(args[1])
if err != nil {
return nil, c.Err(err.Error())
}
- lc, err := parseLogLevel(c, args)
+
+ lc, showFirst, err := parseOptionalParams(c, args[2:])
if err != nil {
return nil, err
}
- return &pattern{period: p, pattern: re, logCallback: lc}, nil
+
+ return &pattern{period: p, pattern: re, logCallback: lc, showFirst: showFirst}, nil
}
-func parseLogLevel(c *caddy.Controller, args []string) (func(format string, v ...any), error) {
- if len(args) != 3 {
- return log.Errorf, nil
+// parseOptionalParams parses optional parameters (log level and show_first flag).
+// Order: log level (optional) must come before show_first (optional).
+func parseOptionalParams(c *caddy.Controller, args []string) (func(format string, v ...any), bool, error) {
+ logLevels := map[string]func(format string, v ...any){
+ "warning": log.Warningf,
+ "error": log.Errorf,
+ "info": log.Infof,
+ "debug": log.Debugf,
}
- switch args[2] {
- case "warning":
- return log.Warningf, nil
- case "error":
- return log.Errorf, nil
- case "info":
- return log.Infof, nil
- case "debug":
- return log.Debugf, nil
- default:
- return nil, c.Errf("unknown log level argument in consolidate: %s", args[2])
+ var logCallback func(format string, v ...any) // nil means not set yet
+ showFirst := false
+
+ for _, arg := range args {
+ if callback, isLogLevel := logLevels[arg]; isLogLevel {
+ if logCallback != nil {
+ return nil, false, c.Errf("multiple log levels specified in consolidate")
+ }
+ if showFirst {
+ return nil, false, c.Errf("log level must come before show_first in consolidate")
+ }
+ logCallback = callback
+ } else if arg == "show_first" {
+ showFirst = true
+ } else {
+ return nil, false, c.Errf("unknown option in consolidate: %s", arg)
+ }
}
+
+ // Use default log level if not specified
+ if logCallback == nil {
+ logCallback = log.Errorf
+ }
+
+ return logCallback, showFirst, nil
}
diff --git a/vendor/github.com/coredns/coredns/plugin/etcd/msg/service.go b/vendor/github.com/coredns/coredns/plugin/etcd/msg/service.go
index e653d07d7..f3ac45ffa 100644
--- a/vendor/github.com/coredns/coredns/plugin/etcd/msg/service.go
+++ b/vendor/github.com/coredns/coredns/plugin/etcd/msg/service.go
@@ -44,7 +44,7 @@ func (s *Service) NewSRV(name string, weight uint16) *dns.SRV {
}
return &dns.SRV{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeSRV, Class: dns.ClassINET, Ttl: s.TTL},
- Priority: uint16(s.Priority), Weight: weight, Port: uint16(s.Port), Target: host}
+ Priority: uint16(s.Priority), Weight: weight, Port: uint16(s.Port), Target: host} // #nosec G115 -- Priority and Port fit in uint16
}
// NewMX returns a new MX record based on the Service.
@@ -55,7 +55,7 @@ func (s *Service) NewMX(name string) *dns.MX {
}
return &dns.MX{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: s.TTL},
- Preference: uint16(s.Priority), Mx: host}
+ Preference: uint16(s.Priority), Mx: host} // #nosec G115 -- MX preference fits in uint16
}
// NewA returns a new A record based on the Service.
diff --git a/vendor/github.com/coredns/coredns/plugin/forward/README.md b/vendor/github.com/coredns/coredns/plugin/forward/README.md
index 436b6c2e5..44679f091 100644
--- a/vendor/github.com/coredns/coredns/plugin/forward/README.md
+++ b/vendor/github.com/coredns/coredns/plugin/forward/README.md
@@ -44,6 +44,7 @@ forward FROM TO... {
force_tcp
prefer_udp
expire DURATION
+ max_idle_conns INTEGER
max_fails INTEGER
max_connect_attempts INTEGER
tls CERT KEY CA
@@ -71,6 +72,8 @@ forward FROM TO... {
performed for a single incoming DNS request. Default value of 0 means no per-request
cap.
* `expire` **DURATION**, expire (cached) connections after this time, the default is 10s.
+* `max_idle_conns` **INTEGER**, maximum number of idle connections to cache per upstream for reuse.
+ Default is 0, which means unlimited.
* `tls` **CERT** **KEY** **CA** define the TLS properties for TLS connection. From 0 to 3 arguments can be
provided with the meaning as described below
diff --git a/vendor/github.com/coredns/coredns/plugin/forward/forward.go b/vendor/github.com/coredns/coredns/plugin/forward/forward.go
index 449579e5a..306519dc2 100644
--- a/vendor/github.com/coredns/coredns/plugin/forward/forward.go
+++ b/vendor/github.com/coredns/coredns/plugin/forward/forward.go
@@ -49,6 +49,7 @@ type Forward struct {
tlsServerName string
maxfails uint32
expire time.Duration
+ maxIdleConns int
maxConcurrent int64
failfastUnhealthyUpstreams bool
failoverRcodes []int
diff --git a/vendor/github.com/coredns/coredns/plugin/forward/policy.go b/vendor/github.com/coredns/coredns/plugin/forward/policy.go
index 7bd1f316a..3b62b1b49 100644
--- a/vendor/github.com/coredns/coredns/plugin/forward/policy.go
+++ b/vendor/github.com/coredns/coredns/plugin/forward/policy.go
@@ -47,7 +47,7 @@ type roundRobin struct {
func (r *roundRobin) String() string { return "round_robin" }
func (r *roundRobin) List(p []*proxy.Proxy) []*proxy.Proxy {
- poolLen := uint32(len(p))
+ poolLen := uint32(len(p)) // #nosec G115 -- pool length is small
i := atomic.AddUint32(&r.robin, 1) % poolLen
robin := []*proxy.Proxy{p[i]}
diff --git a/vendor/github.com/coredns/coredns/plugin/forward/setup.go b/vendor/github.com/coredns/coredns/plugin/forward/setup.go
index 6469bfad2..6f17882f9 100644
--- a/vendor/github.com/coredns/coredns/plugin/forward/setup.go
+++ b/vendor/github.com/coredns/coredns/plugin/forward/setup.go
@@ -196,6 +196,7 @@ func parseStanza(c *caddy.Controller) (*Forward, error) {
}
}
f.proxies[i].SetExpire(f.expire)
+ f.proxies[i].SetMaxIdleConns(f.maxIdleConns)
f.proxies[i].GetHealthchecker().SetRecursionDesired(f.opts.HCRecursionDesired)
// when TLS is used, checks are set to tcp-tls
if f.opts.ForceTCP && transports[i] != transport.TLS {
@@ -311,6 +312,18 @@ func parseBlock(c *caddy.Controller, f *Forward) error {
return fmt.Errorf("expire can't be negative: %s", dur)
}
f.expire = dur
+ case "max_idle_conns":
+ if !c.NextArg() {
+ return c.ArgErr()
+ }
+ n, err := strconv.Atoi(c.Val())
+ if err != nil {
+ return err
+ }
+ if n < 0 {
+ return fmt.Errorf("max_idle_conns can't be negative: %d", n)
+ }
+ f.maxIdleConns = n
case "policy":
if !c.NextArg() {
return c.ArgErr()
diff --git a/vendor/github.com/coredns/coredns/plugin/health/health.go b/vendor/github.com/coredns/coredns/plugin/health/health.go
index 980cf2bc8..046166012 100644
--- a/vendor/github.com/coredns/coredns/plugin/health/health.go
+++ b/vendor/github.com/coredns/coredns/plugin/health/health.go
@@ -22,12 +22,15 @@ type health struct {
healthURI *url.URL
ln net.Listener
+ srv *http.Server
nlSetup bool
mux *http.ServeMux
stop context.CancelFunc
}
+const shutdownTimeout = 5 * time.Second
+
func (h *health) OnStartup() error {
if h.Addr == "" {
h.Addr = ":8080"
@@ -63,7 +66,14 @@ func (h *health) OnStartup() error {
ctx := context.Background()
ctx, h.stop = context.WithCancel(ctx)
- go func() { http.Serve(h.ln, h.mux) }()
+ h.srv = &http.Server{
+ Handler: h.mux,
+ ReadTimeout: 5 * time.Second,
+ WriteTimeout: 5 * time.Second,
+ IdleTimeout: 5 * time.Second,
+ }
+
+ go func() { h.srv.Serve(h.ln) }()
go func() { h.overloaded(ctx) }()
return nil
@@ -81,7 +91,11 @@ func (h *health) OnFinalShutdown() error {
h.stop()
- h.ln.Close()
+ ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
+ defer cancel()
+ if err := h.srv.Shutdown(ctx); err != nil {
+ log.Infof("Failed to stop health http server: %s", err)
+ }
h.nlSetup = false
return nil
}
@@ -93,7 +107,11 @@ func (h *health) OnReload() error {
h.stop()
- h.ln.Close()
+ ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
+ defer cancel()
+ if err := h.srv.Shutdown(ctx); err != nil {
+ log.Infof("Failed to stop health http server: %s", err)
+ }
h.nlSetup = false
return nil
}
diff --git a/vendor/github.com/coredns/coredns/plugin/loadbalance/weighted.go b/vendor/github.com/coredns/coredns/plugin/loadbalance/weighted.go
index 275af6f88..791d0b345 100644
--- a/vendor/github.com/coredns/coredns/plugin/loadbalance/weighted.go
+++ b/vendor/github.com/coredns/coredns/plugin/loadbalance/weighted.go
@@ -3,7 +3,7 @@ package loadbalance
import (
"bufio"
"bytes"
- "crypto/md5"
+ "crypto/md5" // #nosec G501 -- used only as a checksum for file change detection (not for security).
"errors"
"fmt"
"io"
@@ -52,11 +52,11 @@ type randomUint struct {
}
func (r *randomUint) randInit() {
- r.rn = rand.New(rand.NewSource(time.Now().UnixNano()))
+ r.rn = rand.New(rand.NewSource(time.Now().UnixNano())) // #nosec G404 -- non-cryptographic randomness for load balancing.
}
func (r *randomUint) randUint(limit uint) uint {
- return uint(r.rn.Intn(int(limit)))
+ return uint(r.rn.Intn(int(limit))) // #nosec G115 -- limit is bounded by RR count
}
func weightedShuffle(res *dns.Msg, w *weightedRR) *dns.Msg {
@@ -245,7 +245,7 @@ func (w *weightedRR) updateWeights() error {
if err != nil {
return err
}
- md5sum := md5.Sum(bytes)
+ md5sum := md5.Sum(bytes) // #nosec G401 -- used only as a checksum for file change detection (not for security).
if md5sum == w.md5sum {
// file contents has not changed
return nil
diff --git a/vendor/github.com/coredns/coredns/plugin/metrics/handler.go b/vendor/github.com/coredns/coredns/plugin/metrics/handler.go
index fb350a2f5..4ac0e0ecc 100644
--- a/vendor/github.com/coredns/coredns/plugin/metrics/handler.go
+++ b/vendor/github.com/coredns/coredns/plugin/metrics/handler.go
@@ -2,7 +2,6 @@ package metrics
import (
"context"
- "path/filepath"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics/vars"
@@ -36,9 +35,9 @@ func (m *Metrics) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg
// see https://github.com/coredns/coredns/blob/master/core/dnsserver/server.go#L318
rc = status
}
- plugin := m.authoritativePlugin(rw.Caller)
// Pass the original request size to vars.Report
- vars.Report(WithServer(ctx), state, zone, WithView(ctx), rcode.ToString(rc), plugin,
+ // rw.Plugin is set automatically by the plugin chain via the PluginTracker interface
+ vars.Report(WithServer(ctx), state, zone, WithView(ctx), rcode.ToString(rc), rw.Plugin,
rw.Len, rw.Start, vars.WithOriginalReqSize(originalSize))
return status, err
@@ -46,17 +45,3 @@ func (m *Metrics) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg
// Name implements the Handler interface.
func (m *Metrics) Name() string { return "prometheus" }
-
-// authoritativePlugin returns which of made the write, if none is found the empty string is returned.
-func (m *Metrics) authoritativePlugin(caller [3]string) string {
- // a b and c contain the full path of the caller, the plugin name 2nd last elements
- // .../coredns/plugin/whoami/whoami.go --> whoami
- // this is likely FS specific, so use filepath.
- for _, c := range caller {
- plug := filepath.Base(filepath.Dir(c))
- if _, ok := m.plugins[plug]; ok {
- return plug
- }
- }
- return ""
-}
diff --git a/vendor/github.com/coredns/coredns/plugin/metrics/recorder.go b/vendor/github.com/coredns/coredns/plugin/metrics/recorder.go
index d4d42ba5c..c11ceb8ec 100644
--- a/vendor/github.com/coredns/coredns/plugin/metrics/recorder.go
+++ b/vendor/github.com/coredns/coredns/plugin/metrics/recorder.go
@@ -1,8 +1,6 @@
package metrics
import (
- "runtime"
-
"github.com/coredns/coredns/plugin/pkg/dnstest"
"github.com/miekg/dns"
@@ -11,8 +9,9 @@ import (
// Recorder is a dnstest.Recorder specific to the metrics plugin.
type Recorder struct {
*dnstest.Recorder
- // CallerN holds the string return value of the call to runtime.Caller(N+1)
- Caller [3]string
+ // Plugin holds the name of the plugin that wrote the response.
+ // This is set automatically by the plugin chain via the PluginTracker interface.
+ Plugin string
}
// NewRecorder makes and returns a new Recorder.
@@ -21,8 +20,15 @@ func NewRecorder(w dns.ResponseWriter) *Recorder { return &Recorder{Recorder: dn
// WriteMsg records the status code and calls the
// underlying ResponseWriter's WriteMsg method.
func (r *Recorder) WriteMsg(res *dns.Msg) error {
- _, r.Caller[0], _, _ = runtime.Caller(1)
- _, r.Caller[1], _, _ = runtime.Caller(2)
- _, r.Caller[2], _, _ = runtime.Caller(3)
return r.Recorder.WriteMsg(res)
}
+
+// SetPlugin implements the plugin.PluginTracker interface.
+func (r *Recorder) SetPlugin(name string) {
+ r.Plugin = name
+}
+
+// GetPlugin implements the plugin.PluginTracker interface.
+func (r *Recorder) GetPlugin() string {
+ return r.Plugin
+}
diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/proxy/connect.go b/vendor/github.com/coredns/coredns/plugin/pkg/proxy/connect.go
index 8445d18a7..4026cfbdd 100644
--- a/vendor/github.com/coredns/coredns/plugin/pkg/proxy/connect.go
+++ b/vendor/github.com/coredns/coredns/plugin/pkg/proxy/connect.go
@@ -1,7 +1,6 @@
-// Package proxy implements a forwarding proxy. It caches an upstream net.Conn for some time, so if the same
-// client returns the upstream's Conn will be precached. Depending on how you benchmark this looks to be
-// 50% faster than just opening a new connection for every client. It works with UDP and TCP and uses
-// inband healthchecking.
+// Package proxy implements a forwarding proxy with connection caching.
+// It manages a pool of upstream connections (UDP and TCP) to reuse them for subsequent requests,
+// reducing latency and handshake overhead. It supports in-band health checking.
package proxy
import (
@@ -19,10 +18,7 @@ import (
)
const (
- ErrTransportStopped = "proxy: transport stopped"
- ErrTransportStoppedDuringDial = "proxy: transport stopped during dial"
- ErrTransportStoppedRetClosed = "proxy: transport stopped, ret channel closed"
- ErrTransportStoppedDuringRetWait = "proxy: transport stopped during ret wait"
+ ErrTransportStopped = "proxy: transport stopped"
)
// limitTimeout is a utility function to auto-tune timeout values
@@ -66,41 +62,35 @@ func (t *Transport) Dial(proto string) (*persistConn, bool, error) {
default:
}
- // Use select to avoid blocking if connManager has stopped
- select {
- case t.dial <- proto:
- // Successfully sent dial request
- case <-t.stop:
- return nil, false, errors.New(ErrTransportStoppedDuringDial)
- }
+ transtype := stringToTransportType(proto)
- // Receive response with stop awareness
- select {
- case pc, ok := <-t.ret:
- if !ok {
- // ret channel was closed by connManager during stop
- return nil, false, errors.New(ErrTransportStoppedRetClosed)
+ t.mu.Lock()
+ // FIFO: take the oldest conn (front of slice) for source port diversity
+ for len(t.conns[transtype]) > 0 {
+ pc := t.conns[transtype][0]
+ t.conns[transtype] = t.conns[transtype][1:]
+ if time.Since(pc.used) > t.expire {
+ pc.c.Close()
+ continue
}
+ t.mu.Unlock()
+ connCacheHitsCount.WithLabelValues(t.proxyName, t.addr, proto).Add(1)
+ return pc, true, nil
+ }
+ t.mu.Unlock()
- if pc != nil {
- connCacheHitsCount.WithLabelValues(t.proxyName, t.addr, proto).Add(1)
- return pc, true, nil
- }
- connCacheMissesCount.WithLabelValues(t.proxyName, t.addr, proto).Add(1)
-
- reqTime := time.Now()
- timeout := t.dialTimeout()
- if proto == "tcp-tls" {
- conn, err := dns.DialTimeoutWithTLS("tcp", t.addr, t.tlsConfig, timeout)
- t.updateDialTimeout(time.Since(reqTime))
- return &persistConn{c: conn}, false, err
- }
- conn, err := dns.DialTimeout(proto, t.addr, timeout)
+ connCacheMissesCount.WithLabelValues(t.proxyName, t.addr, proto).Add(1)
+
+ reqTime := time.Now()
+ timeout := t.dialTimeout()
+ if proto == "tcp-tls" {
+ conn, err := dns.DialTimeoutWithTLS("tcp", t.addr, t.tlsConfig, timeout)
t.updateDialTimeout(time.Since(reqTime))
return &persistConn{c: conn}, false, err
- case <-t.stop:
- return nil, false, errors.New(ErrTransportStoppedDuringRetWait)
}
+ conn, err := dns.DialTimeout(proto, t.addr, timeout)
+ t.updateDialTimeout(time.Since(reqTime))
+ return &persistConn{c: conn}, false, err
}
// Connect selects an upstream, sends the request and waits for a response.
@@ -123,7 +113,7 @@ func (p *Proxy) Connect(ctx context.Context, state request.Request, opts Options
}
// Set buffer size correctly for this client.
- pc.c.UDPSize = max(uint16(state.Size()), 512)
+ pc.c.UDPSize = max(uint16(state.Size()), 512) // #nosec G115 -- UDP size fits in uint16
pc.c.SetWriteDeadline(time.Now().Add(maxTimeout))
// records the origin Id before upstream.
diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/proxy/persistent.go b/vendor/github.com/coredns/coredns/plugin/pkg/proxy/persistent.go
index 0bacc851a..da2dca122 100644
--- a/vendor/github.com/coredns/coredns/plugin/pkg/proxy/persistent.go
+++ b/vendor/github.com/coredns/coredns/plugin/pkg/proxy/persistent.go
@@ -3,6 +3,7 @@ package proxy
import (
"crypto/tls"
"sort"
+ "sync"
"time"
"github.com/miekg/dns"
@@ -16,17 +17,16 @@ type persistConn struct {
// Transport hold the persistent cache.
type Transport struct {
- avgDialTime int64 // kind of average time of dial time
- conns [typeTotalCount][]*persistConn // Buckets for udp, tcp and tcp-tls.
- expire time.Duration // After this duration a connection is expired.
- addr string
- tlsConfig *tls.Config
- proxyName string
-
- dial chan string
- yield chan *persistConn
- ret chan *persistConn
- stop chan bool
+ avgDialTime int64 // kind of average time of dial time
+ conns [typeTotalCount][]*persistConn // Buckets for udp, tcp and tcp-tls.
+ expire time.Duration // After this duration a connection is expired.
+ maxIdleConns int // Max idle connections per transport type; 0 means unlimited.
+ addr string
+ tlsConfig *tls.Config
+ proxyName string
+
+ mu sync.Mutex
+ stop chan struct{}
}
func newTransport(proxyName, addr string) *Transport {
@@ -35,10 +35,7 @@ func newTransport(proxyName, addr string) *Transport {
conns: [typeTotalCount][]*persistConn{},
expire: defaultExpire,
addr: addr,
- dial: make(chan string),
- yield: make(chan *persistConn),
- ret: make(chan *persistConn),
- stop: make(chan bool),
+ stop: make(chan struct{}),
proxyName: proxyName,
}
return t
@@ -48,38 +45,12 @@ func newTransport(proxyName, addr string) *Transport {
func (t *Transport) connManager() {
ticker := time.NewTicker(defaultExpire)
defer ticker.Stop()
-Wait:
for {
select {
- case proto := <-t.dial:
- transtype := stringToTransportType(proto)
- // take the last used conn - complexity O(1)
- if stack := t.conns[transtype]; len(stack) > 0 {
- pc := stack[len(stack)-1]
- if time.Since(pc.used) < t.expire {
- // Found one, remove from pool and return this conn.
- t.conns[transtype] = stack[:len(stack)-1]
- t.ret <- pc
- continue Wait
- }
- // clear entire cache if the last conn is expired
- t.conns[transtype] = nil
- // now, the connections being passed to closeConns() are not reachable from
- // transport methods anymore. So, it's safe to close them in a separate goroutine
- go closeConns(stack)
- }
- t.ret <- nil
-
- case pc := <-t.yield:
- transtype := t.transportTypeFromConn(pc)
- t.conns[transtype] = append(t.conns[transtype], pc)
-
case <-ticker.C:
t.cleanup(false)
-
case <-t.stop:
t.cleanup(true)
- close(t.ret)
return
}
}
@@ -94,6 +65,9 @@ func closeConns(conns []*persistConn) {
// cleanup removes connections from cache.
func (t *Transport) cleanup(all bool) {
+ var toClose []*persistConn
+
+ t.mu.Lock()
staleTime := time.Now().Add(-t.expire)
for transtype, stack := range t.conns {
if len(stack) == 0 {
@@ -101,9 +75,7 @@ func (t *Transport) cleanup(all bool) {
}
if all {
t.conns[transtype] = nil
- // now, the connections being passed to closeConns() are not reachable from
- // transport methods anymore. So, it's safe to close them in a separate goroutine
- go closeConns(stack)
+ toClose = append(toClose, stack...)
continue
}
if stack[0].used.After(staleTime) {
@@ -115,34 +87,38 @@ func (t *Transport) cleanup(all bool) {
return stack[i].used.After(staleTime)
})
t.conns[transtype] = stack[good:]
- // now, the connections being passed to closeConns() are not reachable from
- // transport methods anymore. So, it's safe to close them in a separate goroutine
- go closeConns(stack[:good])
+ toClose = append(toClose, stack[:good]...)
}
-}
+ t.mu.Unlock()
-// It is hard to pin a value to this, the import thing is to no block forever, losing at cached connection is not terrible.
-const yieldTimeout = 25 * time.Millisecond
+ // Close connections after releasing lock
+ closeConns(toClose)
+}
// Yield returns the connection to transport for reuse.
func (t *Transport) Yield(pc *persistConn) {
- pc.used = time.Now() // update used time
-
- // Optimization: Try to return the connection immediately without creating a timer.
- // If the receiver is not ready, we fall back to a timeout-based send to avoid blocking forever.
- // Returning the connection is just an optimization, so dropping it on timeout is fine.
+ // Check if transport is stopped before acquiring lock
select {
- case t.yield <- pc:
+ case <-t.stop:
+ // If stopped, don't return to pool, just close
+ pc.c.Close()
return
default:
}
- select {
- case t.yield <- pc:
- return
- case <-time.After(yieldTimeout):
+ pc.used = time.Now() // update used time
+
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ transtype := t.transportTypeFromConn(pc)
+
+ if t.maxIdleConns > 0 && len(t.conns[transtype]) >= t.maxIdleConns {
+ pc.c.Close()
return
}
+
+ t.conns[transtype] = append(t.conns[transtype], pc)
}
// Start starts the transport's connection manager.
@@ -154,6 +130,10 @@ func (t *Transport) Stop() { close(t.stop) }
// SetExpire sets the connection expire time in transport.
func (t *Transport) SetExpire(expire time.Duration) { t.expire = expire }
+// SetMaxIdleConns sets the maximum idle connections per transport type.
+// A value of 0 means unlimited (default).
+func (t *Transport) SetMaxIdleConns(n int) { t.maxIdleConns = n }
+
// SetTLSConfig sets the TLS config in transport.
func (t *Transport) SetTLSConfig(cfg *tls.Config) { t.tlsConfig = cfg }
diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/proxy/proxy.go b/vendor/github.com/coredns/coredns/plugin/pkg/proxy/proxy.go
index 35e94bf83..6c460c397 100644
--- a/vendor/github.com/coredns/coredns/plugin/pkg/proxy/proxy.go
+++ b/vendor/github.com/coredns/coredns/plugin/pkg/proxy/proxy.go
@@ -52,6 +52,10 @@ func (p *Proxy) SetTLSConfig(cfg *tls.Config) {
// SetExpire sets the expire duration in the lower p.transport.
func (p *Proxy) SetExpire(expire time.Duration) { p.transport.SetExpire(expire) }
+// SetMaxIdleConns sets the maximum idle connections per transport type.
+// A value of 0 means unlimited (default).
+func (p *Proxy) SetMaxIdleConns(n int) { p.transport.SetMaxIdleConns(n) }
+
func (p *Proxy) GetHealthchecker() HealthChecker {
return p.health
}
diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/rand/rand.go b/vendor/github.com/coredns/coredns/plugin/pkg/rand/rand.go
index f7332adf5..de20d771d 100644
--- a/vendor/github.com/coredns/coredns/plugin/pkg/rand/rand.go
+++ b/vendor/github.com/coredns/coredns/plugin/pkg/rand/rand.go
@@ -17,7 +17,7 @@ type Rand struct {
// New returns a new Rand from seed.
func New(seed int64) *Rand {
- return &Rand{r: rand.New(rand.NewSource(seed))}
+ return &Rand{r: rand.New(rand.NewSource(seed))} // #nosec G404 -- non-cryptographic RNG by design (load balancing only).
}
// Int returns a non-negative pseudo-random int from the Source in Rand.r.
diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/tls/tls.go b/vendor/github.com/coredns/coredns/plugin/pkg/tls/tls.go
index 41eff4bc0..a2a955f05 100644
--- a/vendor/github.com/coredns/coredns/plugin/pkg/tls/tls.go
+++ b/vendor/github.com/coredns/coredns/plugin/pkg/tls/tls.go
@@ -95,7 +95,11 @@ func NewTLSConfig(certPath, keyPath, caPath string) (*tls.Config, error) {
return nil, err
}
- tlsConfig := &tls.Config{Certificates: []tls.Certificate{cert}, RootCAs: roots}
+ // #nosec G402 -- MinVersion and MaxVersion are set in setTLSDefaults
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: roots,
+ }
setTLSDefaults(tlsConfig)
return tlsConfig, nil
@@ -109,7 +113,10 @@ func NewTLSClientConfig(caPath string) (*tls.Config, error) {
return nil, err
}
- tlsConfig := &tls.Config{RootCAs: roots}
+ // #nosec G402 -- MinVersion and MaxVersion are set in setTLSDefaults
+ tlsConfig := &tls.Config{
+ RootCAs: roots,
+ }
setTLSDefaults(tlsConfig)
return tlsConfig, nil
diff --git a/vendor/github.com/coredns/coredns/plugin/plugin.go b/vendor/github.com/coredns/coredns/plugin/plugin.go
index 43c1e6547..2cb55e64d 100644
--- a/vendor/github.com/coredns/coredns/plugin/plugin.go
+++ b/vendor/github.com/coredns/coredns/plugin/plugin.go
@@ -5,6 +5,7 @@ import (
"context"
"errors"
"fmt"
+ "net"
"github.com/miekg/dns"
ot "github.com/opentracing/opentracing-go"
@@ -77,12 +78,60 @@ func NextOrFailure(name string, next Handler, ctx context.Context, w dns.Respons
defer child.Finish()
ctx = ot.ContextWithSpan(ctx, child)
}
- return next.ServeDNS(ctx, w, r)
+ // Wrap the ResponseWriter to track which plugin writes the response
+ pw := &pluginWriter{ResponseWriter: w, plugin: next.Name()}
+ return next.ServeDNS(ctx, pw, r)
}
return dns.RcodeServerFailure, Error(name, errors.New("no next plugin found"))
}
+// PluginTracker is an interface for ResponseWriters that track which plugin wrote the response.
+type PluginTracker interface {
+ SetPlugin(name string)
+ GetPlugin() string
+}
+
+// pluginWriter wraps a dns.ResponseWriter to track which plugin writes the response.
+type pluginWriter struct {
+ dns.ResponseWriter
+ plugin string
+}
+
+// WriteMsg implements dns.ResponseWriter and tracks the plugin that wrote the response.
+func (pw *pluginWriter) WriteMsg(m *dns.Msg) error {
+ if tracker, ok := pw.ResponseWriter.(PluginTracker); ok {
+ tracker.SetPlugin(pw.plugin)
+ }
+ return pw.ResponseWriter.WriteMsg(m)
+}
+
+// Write implements dns.ResponseWriter.
+func (pw *pluginWriter) Write(b []byte) (int, error) {
+ if tracker, ok := pw.ResponseWriter.(PluginTracker); ok {
+ tracker.SetPlugin(pw.plugin)
+ }
+ return pw.ResponseWriter.Write(b)
+}
+
+// LocalAddr implements dns.ResponseWriter.
+func (pw *pluginWriter) LocalAddr() net.Addr { return pw.ResponseWriter.LocalAddr() }
+
+// RemoteAddr implements dns.ResponseWriter.
+func (pw *pluginWriter) RemoteAddr() net.Addr { return pw.ResponseWriter.RemoteAddr() }
+
+// Close implements dns.ResponseWriter.
+func (pw *pluginWriter) Close() error { return pw.ResponseWriter.Close() }
+
+// TsigStatus implements dns.ResponseWriter.
+func (pw *pluginWriter) TsigStatus() error { return pw.ResponseWriter.TsigStatus() }
+
+// TsigTimersOnly implements dns.ResponseWriter.
+func (pw *pluginWriter) TsigTimersOnly(b bool) { pw.ResponseWriter.TsigTimersOnly(b) }
+
+// Hijack implements dns.ResponseWriter.
+func (pw *pluginWriter) Hijack() { pw.ResponseWriter.Hijack() }
+
// ClientWrite returns true if the response has been written to the client.
// Each plugin to adhere to this protocol.
func ClientWrite(rcode int) bool {
diff --git a/vendor/github.com/coredns/coredns/plugin/pprof/pprof.go b/vendor/github.com/coredns/coredns/plugin/pprof/pprof.go
index 822e6e222..14a43bd14 100644
--- a/vendor/github.com/coredns/coredns/plugin/pprof/pprof.go
+++ b/vendor/github.com/coredns/coredns/plugin/pprof/pprof.go
@@ -3,10 +3,12 @@
package pprof
import (
+ "context"
"net"
"net/http"
pp "net/http/pprof"
"runtime"
+ "time"
"github.com/coredns/coredns/plugin/pkg/reuseport"
)
@@ -15,9 +17,12 @@ type handler struct {
addr string
rateBloc int
ln net.Listener
+ srv *http.Server
mux *http.ServeMux
}
+const shutdownTimeout = 5 * time.Second
+
func (h *handler) Startup() error {
// Reloading the plugin without changing the listening address results
// in an error unless we reuse the port because Startup is called for
@@ -42,15 +47,25 @@ func (h *handler) Startup() error {
runtime.SetBlockProfileRate(h.rateBloc)
- go func() {
- http.Serve(h.ln, h.mux)
- }()
+ h.srv = &http.Server{
+ Handler: h.mux,
+ ReadTimeout: 5 * time.Second,
+ WriteTimeout: 5 * time.Second,
+ IdleTimeout: 5 * time.Second,
+ }
+
+ go func() { h.srv.Serve(h.ln) }()
return nil
}
func (h *handler) Shutdown() error {
- if h.ln != nil {
- return h.ln.Close()
+ if h.srv != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
+ defer cancel()
+ if err := h.srv.Shutdown(ctx); err != nil {
+ log.Infof("Failed to stop pprof http server: %s", err)
+ return err
+ }
}
return nil
}
diff --git a/vendor/github.com/coredns/coredns/plugin/reload/setup.go b/vendor/github.com/coredns/coredns/plugin/reload/setup.go
index 0cbecc6a4..65cd68149 100644
--- a/vendor/github.com/coredns/coredns/plugin/reload/setup.go
+++ b/vendor/github.com/coredns/coredns/plugin/reload/setup.go
@@ -60,7 +60,7 @@ func setup(c *caddy.Controller) error {
j = i / 2
}
- jitter := time.Duration(rand.Int63n(j.Nanoseconds()) - (j.Nanoseconds() / 2))
+ jitter := time.Duration(rand.Int63n(j.Nanoseconds()) - (j.Nanoseconds() / 2)) // #nosec G404 -- non-cryptographic jitter.
i = i + jitter
// prepare info for next onInstanceStartup event
diff --git a/vendor/github.com/coredns/coredns/plugin/rewrite/README.md b/vendor/github.com/coredns/coredns/plugin/rewrite/README.md
index f5e4c0332..5f4d393de 100644
--- a/vendor/github.com/coredns/coredns/plugin/rewrite/README.md
+++ b/vendor/github.com/coredns/coredns/plugin/rewrite/README.md
@@ -74,7 +74,8 @@ The match type, e.g., `exact`, `substring`, etc., triggers rewrite:
* **substring**: on a partial match of the name in the question section of a request
* **prefix**: when the name begins with the matching string
* **suffix**: when the name ends with the matching string
-* **regex**: when the name in the question section of a request matches a regular expression
+* **regex**: when the name in the question section of a request matches a regular expression.
+ Regex patterns must not exceed 10000 characters.
If the match type is omitted, the `exact` match type is assumed. If OPTIONS are
given, the type must be specified.
diff --git a/vendor/github.com/coredns/coredns/plugin/rewrite/cname_target.go b/vendor/github.com/coredns/coredns/plugin/rewrite/cname_target.go
index 46d501801..57ee451b2 100644
--- a/vendor/github.com/coredns/coredns/plugin/rewrite/cname_target.go
+++ b/vendor/github.com/coredns/coredns/plugin/rewrite/cname_target.go
@@ -144,6 +144,9 @@ func newCNAMERule(nextAction string, args ...string) (Rule, error) {
Upstream: upstream.New(),
}
if rewriteType == RegexMatch {
+ if len(paramFromTarget) > maxRegexpLen {
+ return nil, fmt.Errorf("regex pattern too long in a cname rule: %d > %d", len(paramFromTarget), maxRegexpLen)
+ }
re, err := regexp.Compile(paramFromTarget)
if err != nil {
return nil, fmt.Errorf("invalid cname rewrite regex pattern: %w", err)
diff --git a/vendor/github.com/coredns/coredns/plugin/rewrite/name.go b/vendor/github.com/coredns/coredns/plugin/rewrite/name.go
index 65d80d0f0..ab4200352 100644
--- a/vendor/github.com/coredns/coredns/plugin/rewrite/name.go
+++ b/vendor/github.com/coredns/coredns/plugin/rewrite/name.go
@@ -13,6 +13,10 @@ import (
"github.com/miekg/dns"
)
+// maxRegexpLen is a hard limit on the length of a regex pattern to prevent
+// OOM during regex compilation with malicious input.
+const maxRegexpLen = 10000
+
// stringRewriter rewrites a string
type stringRewriter interface {
rewriteString(src string) string
@@ -438,6 +442,9 @@ func getSubExprUsage(s string) int {
// isValidRegexPattern returns a regular expression for pattern matching or errors, if any.
func isValidRegexPattern(rewriteFrom, rewriteTo string) (*regexp.Regexp, error) {
+ if len(rewriteFrom) > maxRegexpLen {
+ return nil, fmt.Errorf("regex pattern too long: %d > %d", len(rewriteFrom), maxRegexpLen)
+ }
rewriteFromPattern, err := regexp.Compile(rewriteFrom)
if err != nil {
return nil, fmt.Errorf("invalid regex matching pattern: %s", rewriteFrom)
diff --git a/vendor/github.com/coredns/coredns/plugin/rewrite/rcode.go b/vendor/github.com/coredns/coredns/plugin/rewrite/rcode.go
index cf12a8c26..3a2a8d30b 100644
--- a/vendor/github.com/coredns/coredns/plugin/rewrite/rcode.go
+++ b/vendor/github.com/coredns/coredns/plugin/rewrite/rcode.go
@@ -142,6 +142,9 @@ func newRCodeRule(nextAction string, args ...string) (Rule, error) {
plugin.Name(args[1]).Normalize(),
}, nil
case RegexMatch:
+ if len(args[1]) > maxRegexpLen {
+ return nil, fmt.Errorf("regex pattern too long in a rcode rule: %d > %d", len(args[1]), maxRegexpLen)
+ }
regexPattern, err := regexp.Compile(args[1])
if err != nil {
return nil, fmt.Errorf("invalid regex pattern in a rcode rule: %s", args[1])
diff --git a/vendor/github.com/coredns/coredns/plugin/rewrite/ttl.go b/vendor/github.com/coredns/coredns/plugin/rewrite/ttl.go
index 5430fc923..f33a7f0dc 100644
--- a/vendor/github.com/coredns/coredns/plugin/rewrite/ttl.go
+++ b/vendor/github.com/coredns/coredns/plugin/rewrite/ttl.go
@@ -140,6 +140,9 @@ func newTTLRule(nextAction string, args ...string) (Rule, error) {
plugin.Name(args[1]).Normalize(),
}, nil
case RegexMatch:
+ if len(args[1]) > maxRegexpLen {
+ return nil, fmt.Errorf("regex pattern too long in a ttl rule: %d > %d", len(args[1]), maxRegexpLen)
+ }
regexPattern, err := regexp.Compile(args[1])
if err != nil {
return nil, fmt.Errorf("invalid regex pattern in a ttl rule: %s", args[1])
@@ -199,7 +202,7 @@ func isValidTTL(v string) (uint32, uint32, bool) {
// reject invalid range
return 0, 0, false
}
- return uint32(min), uint32(max), true
+ return uint32(min), uint32(max), true // #nosec G115 -- min/max parsed with 32-bit limit
}
return 0, 0, false
}
diff --git a/vendor/github.com/coredns/coredns/plugin/template/README.md b/vendor/github.com/coredns/coredns/plugin/template/README.md
index 1bca90662..6ac5b9548 100644
--- a/vendor/github.com/coredns/coredns/plugin/template/README.md
+++ b/vendor/github.com/coredns/coredns/plugin/template/README.md
@@ -26,7 +26,8 @@ template CLASS TYPE [ZONE...] {
* **TYPE** the query type (A, PTR, ... can be ANY to match all types).
* **ZONE** the zone scope(s) for this template. Defaults to the server zones.
* `match` **REGEX** [Go regexp](https://golang.org/pkg/regexp/) that are matched against the incoming question name.
- Specifying no regex matches everything (default: `.*`). First matching regex wins.
+ Specifying no regex matches everything (default: `.*`). First matching regex wins. Regex patterns
+ must not exceed 10000 characters.
* `answer|additional|authority` **RR** A [RFC 1035](https://tools.ietf.org/html/rfc1035#section-5) style resource record fragment
built by a [Go template](https://golang.org/pkg/text/template/) that contains the reply. Specifying no answer will result
in a response with an empty answer section.
diff --git a/vendor/github.com/coredns/coredns/plugin/template/setup.go b/vendor/github.com/coredns/coredns/plugin/template/setup.go
index cb2c706f1..8dda86def 100644
--- a/vendor/github.com/coredns/coredns/plugin/template/setup.go
+++ b/vendor/github.com/coredns/coredns/plugin/template/setup.go
@@ -13,6 +13,10 @@ import (
"github.com/miekg/dns"
)
+// maxRegexpLen is a hard limit on the length of a regex pattern to prevent
+// OOM during regex compilation with malicious input.
+const maxRegexpLen = 10000
+
func init() { plugin.Register("template", setupTemplate) }
func setupTemplate(c *caddy.Controller) error {
@@ -67,6 +71,9 @@ func templateParse(c *caddy.Controller) (handler Handler, err error) {
return handler, c.ArgErr()
}
for _, regex := range args {
+ if len(regex) > maxRegexpLen {
+ return handler, c.Errf("regex pattern too long: %d > %d", len(regex), maxRegexpLen)
+ }
r, err := regexp.Compile(regex)
if err != nil {
return handler, c.Errf("could not parse regex: %s, %v", regex, err)
diff --git a/vendor/github.com/coredns/coredns/plugin/test/file.go b/vendor/github.com/coredns/coredns/plugin/test/file.go
index 93fce6c08..b34d4d4d5 100644
--- a/vendor/github.com/coredns/coredns/plugin/test/file.go
+++ b/vendor/github.com/coredns/coredns/plugin/test/file.go
@@ -12,7 +12,10 @@ func TempFile(dir, content string) (string, func(), error) {
if err != nil {
return "", nil, err
}
- if err := os.WriteFile(f.Name(), []byte(content), 0644); err != nil {
+ if err := f.Close(); err != nil {
+ return "", nil, err
+ }
+ if err := os.WriteFile(f.Name(), []byte(content), 0600); err != nil {
return "", nil, err
}
rmFunc := func() { os.Remove(f.Name()) }
@@ -43,7 +46,7 @@ xGbtCkhVk2VQ+BiCWnjYXJ6ZMzabP7wiOFDP9Pvr2ik22PRItsW/TLfHFXM1jDmc
I1rs/VUGKzcJGVIWbHrgjP68CTStGAvKgbsTqw7aLXTSqtPw88N9XVSyRg==
-----END CERTIFICATE-----`
path := filepath.Join(tempDir, "ca.pem")
- if err := os.WriteFile(path, []byte(data), 0644); err != nil {
+ if err := os.WriteFile(path, []byte(data), 0600); err != nil {
return "", err
}
data = `-----BEGIN CERTIFICATE-----
@@ -64,10 +67,11 @@ zhDEPP4FhY+Sz+y1yWirphl7A1aZwhXVPcfWIGqpQ3jzNwUeocbH27kuLh+U4hQo
qeg10RdFnw==
-----END CERTIFICATE-----`
path = filepath.Join(tempDir, "cert.pem")
- if err := os.WriteFile(path, []byte(data), 0644); err != nil {
+ if err := os.WriteFile(path, []byte(data), 0600); err != nil {
return "", err
}
+ //nolint:gosec // Test fixture private key.
data = `-----BEGIN RSA PRIVATE KEY-----
MIIEpgIBAAKCAQEAxPBrvAIWiIJp383ndpRF+OuZ74pHsVLTJ/lSv05H+gzcGhL2
y1i7kWXOvfmgvlPq3kZzZ7LvyZSz8KzTumyeNR0ofnlsOklJ0bvNb2Zc3J4vAh58
@@ -96,7 +100,7 @@ E/WObVJXDnBdViu0L9abE9iaTToBVri4cmlDlZagLuKVR+TFTCN/DSlVZTDkqkLI
8chzqtkH6b2b2R73hyRysWjsomys34ma3mEEPTX/aXeAF2MSZ/EWT9yL
-----END RSA PRIVATE KEY-----`
path = filepath.Join(tempDir, "key.pem")
- if err := os.WriteFile(path, []byte(data), 0644); err != nil {
+ if err := os.WriteFile(path, []byte(data), 0600); err != nil {
return "", err
}
diff --git a/vendor/github.com/coredns/coredns/plugin/test/helpers.go b/vendor/github.com/coredns/coredns/plugin/test/helpers.go
index a7094a7b6..420f3b876 100644
--- a/vendor/github.com/coredns/coredns/plugin/test/helpers.go
+++ b/vendor/github.com/coredns/coredns/plugin/test/helpers.go
@@ -117,7 +117,7 @@ func OPT(bufsize int, do bool) *dns.OPT {
o.Hdr.Name = "."
o.Hdr.Rrtype = dns.TypeOPT
o.SetVersion(0)
- o.SetUDPSize(uint16(bufsize))
+ o.SetUDPSize(uint16(bufsize)) // #nosec G115 -- buffer size fits in uint16
if do {
o.SetDo()
}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig
deleted file mode 100644
index faef0c91e..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig
+++ /dev/null
@@ -1,21 +0,0 @@
-root = true
-
-[*]
-charset = utf-8
-end_of_line = lf
-indent_size = 4
-indent_style = space
-insert_final_newline = true
-trim_trailing_whitespace = true
-
-[*.go]
-indent_style = tab
-
-[{Makefile,*.mk}]
-indent_style = tab
-
-[*.nix]
-indent_size = 2
-
-[.golangci.yaml]
-indent_size = 2
diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore
deleted file mode 100644
index 470e7ca2b..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-/.devenv/
-/.direnv/
-/.pre-commit-config.yaml
-/bin/
-/build/
-/var/
diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
deleted file mode 100644
index bda962566..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-version: "2"
-
-run:
- timeout: 10m
-
-linters:
- enable:
- - govet
- - ineffassign
- # - misspell
- - nolintlint
- # - revive
-
- disable:
- - errcheck
- - staticcheck
- - unused
-
- settings:
- misspell:
- locale: US
- nolintlint:
- allow-unused: false # report any unused nolint directives
- require-specific: false # don't require nolint directives to be specific about which linter is being skipped
-
-formatters:
- enable:
- - gci
- - gofmt
- - gofumpt
- - goimports
- # - golines
-
- settings:
- gci:
- sections:
- - standard
- - default
- - localmodule
- gofmt:
- simplify: true
- rewrite-rules:
- - pattern: interface{}
- replacement: any
-
- exclusions:
- paths:
- - internal/
diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
deleted file mode 100644
index afd44e5f5..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
+++ /dev/null
@@ -1,104 +0,0 @@
-> [!WARNING]
-> As of v2 of this library, change log can be found in GitHub releases.
-
-## 1.5.1
-
-* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
-* Fix map of slices not decoding properly in certain cases. [GH-266]
-
-## 1.5.0
-
-* New option `IgnoreUntaggedFields` to ignore decoding to any fields
- without `mapstructure` (or the configured tag name) set [GH-277]
-* New option `ErrorUnset` which makes it an error if any fields
- in a target struct are not set by the decoding process. [GH-225]
-* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
-* Decoding to slice from array no longer crashes [GH-265]
-* Decode nested struct pointers to map [GH-271]
-* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
-* Fix issue where fields with `,omitempty` would sometimes decode
- into a map with an empty string key [GH-281]
-
-## 1.4.3
-
-* Fix cases where `json.Number` didn't decode properly [GH-261]
-
-## 1.4.2
-
-* Custom name matchers to support any sort of casing, formatting, etc. for
- field names. [GH-250]
-* Fix possible panic in ComposeDecodeHookFunc [GH-251]
-
-## 1.4.1
-
-* Fix regression where `*time.Time` value would be set to empty and not be sent
- to decode hooks properly [GH-232]
-
-## 1.4.0
-
-* A new decode hook type `DecodeHookFuncValue` has been added that has
- access to the full values. [GH-183]
-* Squash is now supported with embedded fields that are struct pointers [GH-205]
-* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
-
-## 1.3.3
-
-* Decoding maps from maps creates a settable value for decode hooks [GH-203]
-
-## 1.3.2
-
-* Decode into interface type with a struct value is supported [GH-187]
-
-## 1.3.1
-
-* Squash should only squash embedded structs. [GH-194]
-
-## 1.3.0
-
-* Added `",omitempty"` support. This will ignore zero values in the source
- structure when encoding. [GH-145]
-
-## 1.2.3
-
-* Fix duplicate entries in Keys list with pointer values. [GH-185]
-
-## 1.2.2
-
-* Do not add unsettable (unexported) values to the unused metadata key
- or "remain" value. [GH-150]
-
-## 1.2.1
-
-* Go modules checksum mismatch fix
-
-## 1.2.0
-
-* Added support to capture unused values in a field using the `",remain"` value
- in the mapstructure tag. There is an example to showcase usage.
-* Added `DecoderConfig` option to always squash embedded structs
-* `json.Number` can decode into `uint` types
-* Empty slices are preserved and not replaced with nil slices
-* Fix panic that can occur in when decoding a map into a nil slice of structs
-* Improved package documentation for godoc
-
-## 1.1.2
-
-* Fix error when decode hook decodes interface implementation into interface
- type. [GH-140]
-
-## 1.1.1
-
-* Fix panic that can happen in `decodePtr`
-
-## 1.1.0
-
-* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
-* Support struct to struct decoding [GH-137]
-* If source map value is nil, then destination map value is nil (instead of empty)
-* If source slice value is nil, then destination slice value is nil (instead of empty)
-* If source pointer is nil, then destination pointer is set to nil (instead of
- allocated zero value of type)
-
-## 1.0.0
-
-* Initial tagged stable release.
diff --git a/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE
deleted file mode 100644
index f9c841a51..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md
deleted file mode 100644
index 45db71975..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# mapstructure
-
-[](https://github.com/go-viper/mapstructure/actions/workflows/ci.yaml)
-[](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
-
-[](https://deps.dev/go/github.com%252Fgo-viper%252Fmapstructure%252Fv2)
-
-mapstructure is a Go library for decoding generic map values to structures
-and vice versa, while providing helpful error handling.
-
-This library is most useful when decoding values from some data stream (JSON,
-Gob, etc.) where you don't _quite_ know the structure of the underlying data
-until you read a part of it. You can therefore read a `map[string]interface{}`
-and use this library to decode it into the proper underlying native Go
-structure.
-
-## Installation
-
-```shell
-go get github.com/go-viper/mapstructure/v2
-```
-
-## Migrating from `github.com/mitchellh/mapstructure`
-
-[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
-
-You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
-The API is the same, so you don't need to change anything else.
-
-Here is a script that can help you with the migration:
-
-```shell
-sed -i 's|github.com/mitchellh/mapstructure|github.com/go-viper/mapstructure/v2|g' $(find . -type f -name '*.go')
-```
-
-If you need more time to migrate your code, that is absolutely fine.
-
-Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
-
-```shell
-replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
-```
-
-## Usage & Example
-
-For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
-
-The `Decode` function has examples associated with it there.
-
-## But Why?!
-
-Go offers fantastic standard libraries for decoding formats such as JSON.
-The standard method is to have a struct pre-created, and populate that struct
-from the bytes of the encoded format. This is great, but the problem is if
-you have configuration or an encoding that changes slightly depending on
-specific fields. For example, consider this JSON:
-
-```json
-{
- "type": "person",
- "name": "Mitchell"
-}
-```
-
-Perhaps we can't populate a specific structure without first reading
-the "type" field from the JSON. We could always do two passes over the
-decoding of the JSON (reading the "type" first, and the rest later).
-However, it is much simpler to just decode this into a `map[string]interface{}`
-structure, read the "type" key, then use something like this library
-to decode it into the proper structure.
-
-## Credits
-
-Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
-This is a maintained fork of the original library.
-
-Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
-
-## License
-
-The project is licensed under the [MIT License](LICENSE).
diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
deleted file mode 100644
index a852a0a04..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
+++ /dev/null
@@ -1,714 +0,0 @@
-package mapstructure
-
-import (
- "encoding"
- "errors"
- "fmt"
- "net"
- "net/netip"
- "net/url"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// typedDecodeHook takes a raw DecodeHookFunc (an any) and turns
-// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
-func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
- // Create variables here so we can reference them with the reflect pkg
- var f1 DecodeHookFuncType
- var f2 DecodeHookFuncKind
- var f3 DecodeHookFuncValue
-
- // Fill in the variables into this interface and the rest is done
- // automatically using the reflect package.
- potential := []any{f1, f2, f3}
-
- v := reflect.ValueOf(h)
- vt := v.Type()
- for _, raw := range potential {
- pt := reflect.ValueOf(raw).Type()
- if vt.ConvertibleTo(pt) {
- return v.Convert(pt).Interface()
- }
- }
-
- return nil
-}
-
-// cachedDecodeHook takes a raw DecodeHookFunc (an any) and turns
-// it into a closure to be used directly
-// if the type fails to convert we return a closure always erroring to keep the previous behaviour
-func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (any, error) {
- switch f := typedDecodeHook(raw).(type) {
- case DecodeHookFuncType:
- return func(from reflect.Value, to reflect.Value) (any, error) {
- return f(from.Type(), to.Type(), from.Interface())
- }
- case DecodeHookFuncKind:
- return func(from reflect.Value, to reflect.Value) (any, error) {
- return f(from.Kind(), to.Kind(), from.Interface())
- }
- case DecodeHookFuncValue:
- return func(from reflect.Value, to reflect.Value) (any, error) {
- return f(from, to)
- }
- default:
- return func(from reflect.Value, to reflect.Value) (any, error) {
- return nil, errors.New("invalid decode hook signature")
- }
- }
-}
-
-// DecodeHookExec executes the given decode hook. This should be used
-// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
-// that took reflect.Kind instead of reflect.Type.
-func DecodeHookExec(
- raw DecodeHookFunc,
- from reflect.Value, to reflect.Value,
-) (any, error) {
- switch f := typedDecodeHook(raw).(type) {
- case DecodeHookFuncType:
- return f(from.Type(), to.Type(), from.Interface())
- case DecodeHookFuncKind:
- return f(from.Kind(), to.Kind(), from.Interface())
- case DecodeHookFuncValue:
- return f(from, to)
- default:
- return nil, errors.New("invalid decode hook signature")
- }
-}
-
-// ComposeDecodeHookFunc creates a single DecodeHookFunc that
-// automatically composes multiple DecodeHookFuncs.
-//
-// The composed funcs are called in order, with the result of the
-// previous transformation.
-func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
- cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(fs))
- for _, f := range fs {
- cached = append(cached, cachedDecodeHook(f))
- }
- return func(f reflect.Value, t reflect.Value) (any, error) {
- var err error
- data := f.Interface()
-
- newFrom := f
- for _, c := range cached {
- data, err = c(newFrom, t)
- if err != nil {
- return nil, err
- }
- if v, ok := data.(reflect.Value); ok {
- newFrom = v
- } else {
- newFrom = reflect.ValueOf(data)
- }
- }
-
- return data, nil
- }
-}
-
-// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
-// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
-func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
- cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(ff))
- for _, f := range ff {
- cached = append(cached, cachedDecodeHook(f))
- }
- return func(a, b reflect.Value) (any, error) {
- var allErrs string
- var out any
- var err error
-
- for _, c := range cached {
- out, err = c(a, b)
- if err != nil {
- allErrs += err.Error() + "\n"
- continue
- }
-
- return out, nil
- }
-
- return nil, errors.New(allErrs)
- }
-}
-
-// StringToSliceHookFunc returns a DecodeHookFunc that converts
-// string to []string by splitting on the given sep.
-func StringToSliceHookFunc(sep string) DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.SliceOf(f) {
- return data, nil
- }
-
- raw := data.(string)
- if raw == "" {
- return []string{}, nil
- }
-
- return strings.Split(raw, sep), nil
- }
-}
-
-// StringToWeakSliceHookFunc brings back the old (pre-v2) behavior of [StringToSliceHookFunc].
-//
-// As of mapstructure v2.0.0 [StringToSliceHookFunc] checks if the return type is a string slice.
-// This function removes that check.
-func StringToWeakSliceHookFunc(sep string) DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Slice {
- return data, nil
- }
-
- raw := data.(string)
- if raw == "" {
- return []string{}, nil
- }
-
- return strings.Split(raw, sep), nil
- }
-}
-
-// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
-// strings to time.Duration.
-func StringToTimeDurationHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(time.Duration(5)) {
- return data, nil
- }
-
- // Convert it by parsing
- d, err := time.ParseDuration(data.(string))
-
- return d, wrapTimeParseDurationError(err)
- }
-}
-
-// StringToTimeLocationHookFunc returns a DecodeHookFunc that converts
-// strings to *time.Location.
-func StringToTimeLocationHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(time.Local) {
- return data, nil
- }
- d, err := time.LoadLocation(data.(string))
-
- return d, wrapTimeParseLocationError(err)
- }
-}
-
-// StringToURLHookFunc returns a DecodeHookFunc that converts
-// strings to *url.URL.
-func StringToURLHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(&url.URL{}) {
- return data, nil
- }
-
- // Convert it by parsing
- u, err := url.Parse(data.(string))
-
- return u, wrapUrlError(err)
- }
-}
-
-// StringToIPHookFunc returns a DecodeHookFunc that converts
-// strings to net.IP
-func StringToIPHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(net.IP{}) {
- return data, nil
- }
-
- // Convert it by parsing
- ip := net.ParseIP(data.(string))
- if ip == nil {
- return net.IP{}, fmt.Errorf("failed parsing ip")
- }
-
- return ip, nil
- }
-}
-
-// StringToIPNetHookFunc returns a DecodeHookFunc that converts
-// strings to net.IPNet
-func StringToIPNetHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(net.IPNet{}) {
- return data, nil
- }
-
- // Convert it by parsing
- _, net, err := net.ParseCIDR(data.(string))
- return net, wrapNetParseError(err)
- }
-}
-
-// StringToTimeHookFunc returns a DecodeHookFunc that converts
-// strings to time.Time.
-func StringToTimeHookFunc(layout string) DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(time.Time{}) {
- return data, nil
- }
-
- // Convert it by parsing
- ti, err := time.Parse(layout, data.(string))
-
- return ti, wrapTimeParseError(err)
- }
-}
-
-// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
-// the decoder.
-//
-// Note that this is significantly different from the WeaklyTypedInput option
-// of the DecoderConfig.
-func WeaklyTypedHook(
- f reflect.Kind,
- t reflect.Kind,
- data any,
-) (any, error) {
- dataVal := reflect.ValueOf(data)
- switch t {
- case reflect.String:
- switch f {
- case reflect.Bool:
- if dataVal.Bool() {
- return "1", nil
- }
- return "0", nil
- case reflect.Float32:
- return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
- case reflect.Int:
- return strconv.FormatInt(dataVal.Int(), 10), nil
- case reflect.Slice:
- dataType := dataVal.Type()
- elemKind := dataType.Elem().Kind()
- if elemKind == reflect.Uint8 {
- return string(dataVal.Interface().([]uint8)), nil
- }
- case reflect.Uint:
- return strconv.FormatUint(dataVal.Uint(), 10), nil
- }
- }
-
- return data, nil
-}
-
-func RecursiveStructToMapHookFunc() DecodeHookFunc {
- return func(f reflect.Value, t reflect.Value) (any, error) {
- if f.Kind() != reflect.Struct {
- return f.Interface(), nil
- }
-
- var i any = struct{}{}
- if t.Type() != reflect.TypeOf(&i).Elem() {
- return f.Interface(), nil
- }
-
- m := make(map[string]any)
- t.Set(reflect.ValueOf(m))
-
- return f.Interface(), nil
- }
-}
-
-// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
-// strings to the UnmarshalText function, when the target type
-// implements the encoding.TextUnmarshaler interface
-func TextUnmarshallerHookFunc() DecodeHookFuncType {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- result := reflect.New(t).Interface()
- unmarshaller, ok := result.(encoding.TextUnmarshaler)
- if !ok {
- return data, nil
- }
- str, ok := data.(string)
- if !ok {
- str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
- }
- if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
- return nil, err
- }
- return result, nil
- }
-}
-
-// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
-// strings to netip.Addr.
-func StringToNetIPAddrHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(netip.Addr{}) {
- return data, nil
- }
-
- // Convert it by parsing
- addr, err := netip.ParseAddr(data.(string))
-
- return addr, wrapNetIPParseAddrError(err)
- }
-}
-
-// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
-// strings to netip.AddrPort.
-func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(netip.AddrPort{}) {
- return data, nil
- }
-
- // Convert it by parsing
- addrPort, err := netip.ParseAddrPort(data.(string))
-
- return addrPort, wrapNetIPParseAddrPortError(err)
- }
-}
-
-// StringToNetIPPrefixHookFunc returns a DecodeHookFunc that converts
-// strings to netip.Prefix.
-func StringToNetIPPrefixHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data any,
- ) (any, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(netip.Prefix{}) {
- return data, nil
- }
-
- // Convert it by parsing
- prefix, err := netip.ParsePrefix(data.(string))
-
- return prefix, wrapNetIPParsePrefixError(err)
- }
-}
-
-// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
-// strings to basic types.
-// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
-func StringToBasicTypeHookFunc() DecodeHookFunc {
- return ComposeDecodeHookFunc(
- StringToInt8HookFunc(),
- StringToUint8HookFunc(),
- StringToInt16HookFunc(),
- StringToUint16HookFunc(),
- StringToInt32HookFunc(),
- StringToUint32HookFunc(),
- StringToInt64HookFunc(),
- StringToUint64HookFunc(),
- StringToIntHookFunc(),
- StringToUintHookFunc(),
- StringToFloat32HookFunc(),
- StringToFloat64HookFunc(),
- StringToBoolHookFunc(),
- // byte and rune are aliases for uint8 and int32 respectively
- // StringToByteHookFunc(),
- // StringToRuneHookFunc(),
- StringToComplex64HookFunc(),
- StringToComplex128HookFunc(),
- )
-}
-
-// StringToInt8HookFunc returns a DecodeHookFunc that converts
-// strings to int8.
-func StringToInt8HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
- return data, nil
- }
-
- // Convert it by parsing
- i64, err := strconv.ParseInt(data.(string), 0, 8)
- return int8(i64), wrapStrconvNumError(err)
- }
-}
-
-// StringToUint8HookFunc returns a DecodeHookFunc that converts
-// strings to uint8.
-func StringToUint8HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
- return data, nil
- }
-
- // Convert it by parsing
- u64, err := strconv.ParseUint(data.(string), 0, 8)
- return uint8(u64), wrapStrconvNumError(err)
- }
-}
-
-// StringToInt16HookFunc returns a DecodeHookFunc that converts
-// strings to int16.
-func StringToInt16HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
- return data, nil
- }
-
- // Convert it by parsing
- i64, err := strconv.ParseInt(data.(string), 0, 16)
- return int16(i64), wrapStrconvNumError(err)
- }
-}
-
-// StringToUint16HookFunc returns a DecodeHookFunc that converts
-// strings to uint16.
-func StringToUint16HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
- return data, nil
- }
-
- // Convert it by parsing
- u64, err := strconv.ParseUint(data.(string), 0, 16)
- return uint16(u64), wrapStrconvNumError(err)
- }
-}
-
-// StringToInt32HookFunc returns a DecodeHookFunc that converts
-// strings to int32.
-func StringToInt32HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
- return data, nil
- }
-
- // Convert it by parsing
- i64, err := strconv.ParseInt(data.(string), 0, 32)
- return int32(i64), wrapStrconvNumError(err)
- }
-}
-
-// StringToUint32HookFunc returns a DecodeHookFunc that converts
-// strings to uint32.
-func StringToUint32HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
- return data, nil
- }
-
- // Convert it by parsing
- u64, err := strconv.ParseUint(data.(string), 0, 32)
- return uint32(u64), wrapStrconvNumError(err)
- }
-}
-
-// StringToInt64HookFunc returns a DecodeHookFunc that converts
-// strings to int64.
-func StringToInt64HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
- return data, nil
- }
-
- // Convert it by parsing
- i64, err := strconv.ParseInt(data.(string), 0, 64)
- return int64(i64), wrapStrconvNumError(err)
- }
-}
-
-// StringToUint64HookFunc returns a DecodeHookFunc that converts
-// strings to uint64.
-func StringToUint64HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
- return data, nil
- }
-
- // Convert it by parsing
- u64, err := strconv.ParseUint(data.(string), 0, 64)
- return uint64(u64), wrapStrconvNumError(err)
- }
-}
-
-// StringToIntHookFunc returns a DecodeHookFunc that converts
-// strings to int.
-func StringToIntHookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Int {
- return data, nil
- }
-
- // Convert it by parsing
- i64, err := strconv.ParseInt(data.(string), 0, 0)
- return int(i64), wrapStrconvNumError(err)
- }
-}
-
-// StringToUintHookFunc returns a DecodeHookFunc that converts
-// strings to uint.
-func StringToUintHookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
- return data, nil
- }
-
- // Convert it by parsing
- u64, err := strconv.ParseUint(data.(string), 0, 0)
- return uint(u64), wrapStrconvNumError(err)
- }
-}
-
-// StringToFloat32HookFunc returns a DecodeHookFunc that converts
-// strings to float32.
-func StringToFloat32HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
- return data, nil
- }
-
- // Convert it by parsing
- f64, err := strconv.ParseFloat(data.(string), 32)
- return float32(f64), wrapStrconvNumError(err)
- }
-}
-
-// StringToFloat64HookFunc returns a DecodeHookFunc that converts
-// strings to float64.
-func StringToFloat64HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
- return data, nil
- }
-
- // Convert it by parsing
- f64, err := strconv.ParseFloat(data.(string), 64)
- return f64, wrapStrconvNumError(err)
- }
-}
-
-// StringToBoolHookFunc returns a DecodeHookFunc that converts
-// strings to bool.
-func StringToBoolHookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
- return data, nil
- }
-
- // Convert it by parsing
- b, err := strconv.ParseBool(data.(string))
- return b, wrapStrconvNumError(err)
- }
-}
-
-// StringToByteHookFunc returns a DecodeHookFunc that converts
-// strings to byte.
-func StringToByteHookFunc() DecodeHookFunc {
- return StringToUint8HookFunc()
-}
-
-// StringToRuneHookFunc returns a DecodeHookFunc that converts
-// strings to rune.
-func StringToRuneHookFunc() DecodeHookFunc {
- return StringToInt32HookFunc()
-}
-
-// StringToComplex64HookFunc returns a DecodeHookFunc that converts
-// strings to complex64.
-func StringToComplex64HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
- return data, nil
- }
-
- // Convert it by parsing
- c128, err := strconv.ParseComplex(data.(string), 64)
- return complex64(c128), wrapStrconvNumError(err)
- }
-}
-
-// StringToComplex128HookFunc returns a DecodeHookFunc that converts
-// strings to complex128.
-func StringToComplex128HookFunc() DecodeHookFunc {
- return func(f reflect.Type, t reflect.Type, data any) (any, error) {
- if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
- return data, nil
- }
-
- // Convert it by parsing
- c128, err := strconv.ParseComplex(data.(string), 128)
- return c128, wrapStrconvNumError(err)
- }
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/errors.go b/vendor/github.com/go-viper/mapstructure/v2/errors.go
deleted file mode 100644
index 07d31c22a..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/errors.go
+++ /dev/null
@@ -1,244 +0,0 @@
-package mapstructure
-
-import (
- "errors"
- "fmt"
- "net"
- "net/url"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// Error interface is implemented by all errors emitted by mapstructure.
-//
-// Use [errors.As] to check if an error implements this interface.
-type Error interface {
- error
-
- mapstructure()
-}
-
-// DecodeError is a generic error type that holds information about
-// a decoding error together with the name of the field that caused the error.
-type DecodeError struct {
- name string
- err error
-}
-
-func newDecodeError(name string, err error) *DecodeError {
- return &DecodeError{
- name: name,
- err: err,
- }
-}
-
-func (e *DecodeError) Name() string {
- return e.name
-}
-
-func (e *DecodeError) Unwrap() error {
- return e.err
-}
-
-func (e *DecodeError) Error() string {
- return fmt.Sprintf("'%s' %s", e.name, e.err)
-}
-
-func (*DecodeError) mapstructure() {}
-
-// ParseError is an error type that indicates a value could not be parsed
-// into the expected type.
-type ParseError struct {
- Expected reflect.Value
- Value any
- Err error
-}
-
-func (e *ParseError) Error() string {
- return fmt.Sprintf("cannot parse value as '%s': %s", e.Expected.Type(), e.Err)
-}
-
-func (*ParseError) mapstructure() {}
-
-// UnconvertibleTypeError is an error type that indicates a value could not be
-// converted to the expected type.
-type UnconvertibleTypeError struct {
- Expected reflect.Value
- Value any
-}
-
-func (e *UnconvertibleTypeError) Error() string {
- return fmt.Sprintf(
- "expected type '%s', got unconvertible type '%s'",
- e.Expected.Type(),
- reflect.TypeOf(e.Value),
- )
-}
-
-func (*UnconvertibleTypeError) mapstructure() {}
-
-func wrapStrconvNumError(err error) error {
- if err == nil {
- return nil
- }
-
- if err, ok := err.(*strconv.NumError); ok {
- return &strconvNumError{Err: err}
- }
-
- return err
-}
-
-type strconvNumError struct {
- Err *strconv.NumError
-}
-
-func (e *strconvNumError) Error() string {
- return "strconv." + e.Err.Func + ": " + e.Err.Err.Error()
-}
-
-func (e *strconvNumError) Unwrap() error { return e.Err }
-
-func wrapUrlError(err error) error {
- if err == nil {
- return nil
- }
-
- if err, ok := err.(*url.Error); ok {
- return &urlError{Err: err}
- }
-
- return err
-}
-
-type urlError struct {
- Err *url.Error
-}
-
-func (e *urlError) Error() string {
- return fmt.Sprintf("%s", e.Err.Err)
-}
-
-func (e *urlError) Unwrap() error { return e.Err }
-
-func wrapNetParseError(err error) error {
- if err == nil {
- return nil
- }
-
- if err, ok := err.(*net.ParseError); ok {
- return &netParseError{Err: err}
- }
-
- return err
-}
-
-type netParseError struct {
- Err *net.ParseError
-}
-
-func (e *netParseError) Error() string {
- return "invalid " + e.Err.Type
-}
-
-func (e *netParseError) Unwrap() error { return e.Err }
-
-func wrapTimeParseError(err error) error {
- if err == nil {
- return nil
- }
-
- if err, ok := err.(*time.ParseError); ok {
- return &timeParseError{Err: err}
- }
-
- return err
-}
-
-type timeParseError struct {
- Err *time.ParseError
-}
-
-func (e *timeParseError) Error() string {
- if e.Err.Message == "" {
- return fmt.Sprintf("parsing time as %q: cannot parse as %q", e.Err.Layout, e.Err.LayoutElem)
- }
-
- return "parsing time " + e.Err.Message
-}
-
-func (e *timeParseError) Unwrap() error { return e.Err }
-
-func wrapNetIPParseAddrError(err error) error {
- if err == nil {
- return nil
- }
-
- if errMsg := err.Error(); strings.HasPrefix(errMsg, "ParseAddr") {
- errPieces := strings.Split(errMsg, ": ")
-
- return fmt.Errorf("ParseAddr: %s", errPieces[len(errPieces)-1])
- }
-
- return err
-}
-
-func wrapNetIPParseAddrPortError(err error) error {
- if err == nil {
- return nil
- }
-
- errMsg := err.Error()
- if strings.HasPrefix(errMsg, "invalid port ") {
- return errors.New("invalid port")
- } else if strings.HasPrefix(errMsg, "invalid ip:port ") {
- return errors.New("invalid ip:port")
- }
-
- return err
-}
-
-func wrapNetIPParsePrefixError(err error) error {
- if err == nil {
- return nil
- }
-
- if errMsg := err.Error(); strings.HasPrefix(errMsg, "netip.ParsePrefix") {
- errPieces := strings.Split(errMsg, ": ")
-
- return fmt.Errorf("netip.ParsePrefix: %s", errPieces[len(errPieces)-1])
- }
-
- return err
-}
-
-func wrapTimeParseDurationError(err error) error {
- if err == nil {
- return nil
- }
-
- errMsg := err.Error()
- if strings.HasPrefix(errMsg, "time: unknown unit ") {
- return errors.New("time: unknown unit")
- } else if strings.HasPrefix(errMsg, "time: ") {
- idx := strings.LastIndex(errMsg, " ")
-
- return errors.New(errMsg[:idx])
- }
-
- return err
-}
-
-func wrapTimeParseLocationError(err error) error {
- if err == nil {
- return nil
- }
- errMsg := err.Error()
- if strings.Contains(errMsg, "unknown time zone") || strings.HasPrefix(errMsg, "time: unknown format") {
- return fmt.Errorf("invalid time zone format: %w", err)
- }
-
- return err
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock
deleted file mode 100644
index 5e67bdd6b..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/flake.lock
+++ /dev/null
@@ -1,294 +0,0 @@
-{
- "nodes": {
- "cachix": {
- "inputs": {
- "devenv": [
- "devenv"
- ],
- "flake-compat": [
- "devenv"
- ],
- "git-hooks": [
- "devenv"
- ],
- "nixpkgs": "nixpkgs"
- },
- "locked": {
- "lastModified": 1742042642,
- "narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=",
- "owner": "cachix",
- "repo": "cachix",
- "rev": "a624d3eaf4b1d225f918de8543ed739f2f574203",
- "type": "github"
- },
- "original": {
- "owner": "cachix",
- "ref": "latest",
- "repo": "cachix",
- "type": "github"
- }
- },
- "devenv": {
- "inputs": {
- "cachix": "cachix",
- "flake-compat": "flake-compat",
- "git-hooks": "git-hooks",
- "nix": "nix",
- "nixpkgs": "nixpkgs_3"
- },
- "locked": {
- "lastModified": 1744876578,
- "narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=",
- "owner": "cachix",
- "repo": "devenv",
- "rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1",
- "type": "github"
- },
- "original": {
- "owner": "cachix",
- "repo": "devenv",
- "type": "github"
- }
- },
- "flake-compat": {
- "flake": false,
- "locked": {
- "lastModified": 1733328505,
- "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
- "owner": "edolstra",
- "repo": "flake-compat",
- "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
- "type": "github"
- },
- "original": {
- "owner": "edolstra",
- "repo": "flake-compat",
- "type": "github"
- }
- },
- "flake-parts": {
- "inputs": {
- "nixpkgs-lib": [
- "devenv",
- "nix",
- "nixpkgs"
- ]
- },
- "locked": {
- "lastModified": 1712014858,
- "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
- "owner": "hercules-ci",
- "repo": "flake-parts",
- "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
- "type": "github"
- },
- "original": {
- "owner": "hercules-ci",
- "repo": "flake-parts",
- "type": "github"
- }
- },
- "flake-parts_2": {
- "inputs": {
- "nixpkgs-lib": "nixpkgs-lib"
- },
- "locked": {
- "lastModified": 1743550720,
- "narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=",
- "owner": "hercules-ci",
- "repo": "flake-parts",
- "rev": "c621e8422220273271f52058f618c94e405bb0f5",
- "type": "github"
- },
- "original": {
- "owner": "hercules-ci",
- "repo": "flake-parts",
- "type": "github"
- }
- },
- "git-hooks": {
- "inputs": {
- "flake-compat": [
- "devenv"
- ],
- "gitignore": "gitignore",
- "nixpkgs": [
- "devenv",
- "nixpkgs"
- ]
- },
- "locked": {
- "lastModified": 1742649964,
- "narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
- "owner": "cachix",
- "repo": "git-hooks.nix",
- "rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
- "type": "github"
- },
- "original": {
- "owner": "cachix",
- "repo": "git-hooks.nix",
- "type": "github"
- }
- },
- "gitignore": {
- "inputs": {
- "nixpkgs": [
- "devenv",
- "git-hooks",
- "nixpkgs"
- ]
- },
- "locked": {
- "lastModified": 1709087332,
- "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
- "owner": "hercules-ci",
- "repo": "gitignore.nix",
- "rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
- "type": "github"
- },
- "original": {
- "owner": "hercules-ci",
- "repo": "gitignore.nix",
- "type": "github"
- }
- },
- "libgit2": {
- "flake": false,
- "locked": {
- "lastModified": 1697646580,
- "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
- "owner": "libgit2",
- "repo": "libgit2",
- "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
- "type": "github"
- },
- "original": {
- "owner": "libgit2",
- "repo": "libgit2",
- "type": "github"
- }
- },
- "nix": {
- "inputs": {
- "flake-compat": [
- "devenv"
- ],
- "flake-parts": "flake-parts",
- "libgit2": "libgit2",
- "nixpkgs": "nixpkgs_2",
- "nixpkgs-23-11": [
- "devenv"
- ],
- "nixpkgs-regression": [
- "devenv"
- ],
- "pre-commit-hooks": [
- "devenv"
- ]
- },
- "locked": {
- "lastModified": 1741798497,
- "narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=",
- "owner": "domenkozar",
- "repo": "nix",
- "rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd",
- "type": "github"
- },
- "original": {
- "owner": "domenkozar",
- "ref": "devenv-2.24",
- "repo": "nix",
- "type": "github"
- }
- },
- "nixpkgs": {
- "locked": {
- "lastModified": 1733212471,
- "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
- "owner": "NixOS",
- "repo": "nixpkgs",
- "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
- "type": "github"
- },
- "original": {
- "owner": "NixOS",
- "ref": "nixos-unstable",
- "repo": "nixpkgs",
- "type": "github"
- }
- },
- "nixpkgs-lib": {
- "locked": {
- "lastModified": 1743296961,
- "narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=",
- "owner": "nix-community",
- "repo": "nixpkgs.lib",
- "rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa",
- "type": "github"
- },
- "original": {
- "owner": "nix-community",
- "repo": "nixpkgs.lib",
- "type": "github"
- }
- },
- "nixpkgs_2": {
- "locked": {
- "lastModified": 1717432640,
- "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
- "owner": "NixOS",
- "repo": "nixpkgs",
- "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
- "type": "github"
- },
- "original": {
- "owner": "NixOS",
- "ref": "release-24.05",
- "repo": "nixpkgs",
- "type": "github"
- }
- },
- "nixpkgs_3": {
- "locked": {
- "lastModified": 1733477122,
- "narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=",
- "owner": "cachix",
- "repo": "devenv-nixpkgs",
- "rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857",
- "type": "github"
- },
- "original": {
- "owner": "cachix",
- "ref": "rolling",
- "repo": "devenv-nixpkgs",
- "type": "github"
- }
- },
- "nixpkgs_4": {
- "locked": {
- "lastModified": 1744536153,
- "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=",
- "owner": "NixOS",
- "repo": "nixpkgs",
- "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11",
- "type": "github"
- },
- "original": {
- "owner": "NixOS",
- "ref": "nixpkgs-unstable",
- "repo": "nixpkgs",
- "type": "github"
- }
- },
- "root": {
- "inputs": {
- "devenv": "devenv",
- "flake-parts": "flake-parts_2",
- "nixpkgs": "nixpkgs_4"
- }
- }
- },
- "root": "root",
- "version": 7
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix
deleted file mode 100644
index 3b116f426..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/flake.nix
+++ /dev/null
@@ -1,46 +0,0 @@
-{
- inputs = {
- nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
- flake-parts.url = "github:hercules-ci/flake-parts";
- devenv.url = "github:cachix/devenv";
- };
-
- outputs =
- inputs@{ flake-parts, ... }:
- flake-parts.lib.mkFlake { inherit inputs; } {
- imports = [
- inputs.devenv.flakeModule
- ];
-
- systems = [
- "x86_64-linux"
- "x86_64-darwin"
- "aarch64-darwin"
- ];
-
- perSystem =
- { pkgs, ... }:
- rec {
- devenv.shells = {
- default = {
- languages = {
- go.enable = true;
- };
-
- pre-commit.hooks = {
- nixpkgs-fmt.enable = true;
- };
-
- packages = with pkgs; [
- golangci-lint
- ];
-
- # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
- containers = pkgs.lib.mkForce { };
- };
-
- ci = devenv.shells.default;
- };
- };
- };
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
deleted file mode 100644
index d1c15e474..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package errors
-
-import "errors"
-
-func New(text string) error {
- return errors.New(text)
-}
-
-func As(err error, target interface{}) bool {
- return errors.As(err, target)
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
deleted file mode 100644
index d74e3a0b5..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build go1.20
-
-package errors
-
-import "errors"
-
-func Join(errs ...error) error {
- return errors.Join(errs...)
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
deleted file mode 100644
index 700b40229..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
+++ /dev/null
@@ -1,61 +0,0 @@
-//go:build !go1.20
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package errors
-
-// Join returns an error that wraps the given errors.
-// Any nil error values are discarded.
-// Join returns nil if every value in errs is nil.
-// The error formats as the concatenation of the strings obtained
-// by calling the Error method of each element of errs, with a newline
-// between each string.
-//
-// A non-nil error returned by Join implements the Unwrap() []error method.
-func Join(errs ...error) error {
- n := 0
- for _, err := range errs {
- if err != nil {
- n++
- }
- }
- if n == 0 {
- return nil
- }
- e := &joinError{
- errs: make([]error, 0, n),
- }
- for _, err := range errs {
- if err != nil {
- e.errs = append(e.errs, err)
- }
- }
- return e
-}
-
-type joinError struct {
- errs []error
-}
-
-func (e *joinError) Error() string {
- // Since Join returns nil if every value in errs is nil,
- // e.errs cannot be empty.
- if len(e.errs) == 1 {
- return e.errs[0].Error()
- }
-
- b := []byte(e.errs[0].Error())
- for _, err := range e.errs[1:] {
- b = append(b, '\n')
- b = append(b, err.Error()...)
- }
- // At this point, b has at least one byte '\n'.
- // return unsafe.String(&b[0], len(b))
- return string(b)
-}
-
-func (e *joinError) Unwrap() []error {
- return e.errs
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
deleted file mode 100644
index 7c35bce02..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
+++ /dev/null
@@ -1,1712 +0,0 @@
-// Package mapstructure exposes functionality to convert one arbitrary
-// Go type into another, typically to convert a map[string]any
-// into a native Go structure.
-//
-// The Go structure can be arbitrarily complex, containing slices,
-// other structs, etc. and the decoder will properly decode nested
-// maps and so on into the proper structures in the native Go struct.
-// See the examples to see what the decoder is capable of.
-//
-// The simplest function to start with is Decode.
-//
-// # Field Tags
-//
-// When decoding to a struct, mapstructure will use the field name by
-// default to perform the mapping. For example, if a struct has a field
-// "Username" then mapstructure will look for a key in the source value
-// of "username" (case insensitive).
-//
-// type User struct {
-// Username string
-// }
-//
-// You can change the behavior of mapstructure by using struct tags.
-// The default struct tag that mapstructure looks for is "mapstructure"
-// but you can customize it using DecoderConfig.
-//
-// # Renaming Fields
-//
-// To rename the key that mapstructure looks for, use the "mapstructure"
-// tag and set a value directly. For example, to change the "username" example
-// above to "user":
-//
-// type User struct {
-// Username string `mapstructure:"user"`
-// }
-//
-// # Embedded Structs and Squashing
-//
-// Embedded structs are treated as if they're another field with that name.
-// By default, the two structs below are equivalent when decoding with
-// mapstructure:
-//
-// type Person struct {
-// Name string
-// }
-//
-// type Friend struct {
-// Person
-// }
-//
-// type Friend struct {
-// Person Person
-// }
-//
-// This would require an input that looks like below:
-//
-// map[string]any{
-// "person": map[string]any{"name": "alice"},
-// }
-//
-// If your "person" value is NOT nested, then you can append ",squash" to
-// your tag value and mapstructure will treat it as if the embedded struct
-// were part of the struct directly. Example:
-//
-// type Friend struct {
-// Person `mapstructure:",squash"`
-// }
-//
-// Now the following input would be accepted:
-//
-// map[string]any{
-// "name": "alice",
-// }
-//
-// When decoding from a struct to a map, the squash tag squashes the struct
-// fields into a single map. Using the example structs from above:
-//
-// Friend{Person: Person{Name: "alice"}}
-//
-// Will be decoded into a map:
-//
-// map[string]any{
-// "name": "alice",
-// }
-//
-// DecoderConfig has a field that changes the behavior of mapstructure
-// to always squash embedded structs.
-//
-// # Remainder Values
-//
-// If there are any unmapped keys in the source value, mapstructure by
-// default will silently ignore them. You can error by setting ErrorUnused
-// in DecoderConfig. If you're using Metadata you can also maintain a slice
-// of the unused keys.
-//
-// You can also use the ",remain" suffix on your tag to collect all unused
-// values in a map. The field with this tag MUST be a map type and should
-// probably be a "map[string]any" or "map[any]any".
-// See example below:
-//
-// type Friend struct {
-// Name string
-// Other map[string]any `mapstructure:",remain"`
-// }
-//
-// Given the input below, Other would be populated with the other
-// values that weren't used (everything but "name"):
-//
-// map[string]any{
-// "name": "bob",
-// "address": "123 Maple St.",
-// }
-//
-// # Omit Empty Values
-//
-// When decoding from a struct to any other value, you may use the
-// ",omitempty" suffix on your tag to omit that value if it equates to
-// the zero value, or a zero-length element. The zero value of all types is
-// specified in the Go specification.
-//
-// For example, the zero type of a numeric type is zero ("0"). If the struct
-// field value is zero and a numeric type, the field is empty, and it won't
-// be encoded into the destination type. And likewise for the URLs field, if the
-// slice is nil or empty, it won't be encoded into the destination type.
-//
-// type Source struct {
-// Age int `mapstructure:",omitempty"`
-// URLs []string `mapstructure:",omitempty"`
-// }
-//
-// # Omit Zero Values
-//
-// When decoding from a struct to any other value, you may use the
-// ",omitzero" suffix on your tag to omit that value if it equates to the zero
-// value. The zero value of all types is specified in the Go specification.
-//
-// For example, the zero type of a numeric type is zero ("0"). If the struct
-// field value is zero and a numeric type, the field is empty, and it won't
-// be encoded into the destination type. And likewise for the URLs field, if the
-// slice is nil, it won't be encoded into the destination type.
-//
-// Note that if the field is a slice, and it is empty but not nil, it will
-// still be encoded into the destination type.
-//
-// type Source struct {
-// Age int `mapstructure:",omitzero"`
-// URLs []string `mapstructure:",omitzero"`
-// }
-//
-// # Unexported fields
-//
-// Since unexported (private) struct fields cannot be set outside the package
-// where they are defined, the decoder will simply skip them.
-//
-// For this output type definition:
-//
-// type Exported struct {
-// private string // this unexported field will be skipped
-// Public string
-// }
-//
-// Using this map as input:
-//
-// map[string]any{
-// "private": "I will be ignored",
-// "Public": "I made it through!",
-// }
-//
-// The following struct will be decoded:
-//
-// type Exported struct {
-// private: "" // field is left with an empty string (zero value)
-// Public: "I made it through!"
-// }
-//
-// # Other Configuration
-//
-// mapstructure is highly configurable. See the DecoderConfig struct
-// for other features and options that are supported.
-package mapstructure
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
-
- "github.com/go-viper/mapstructure/v2/internal/errors"
-)
-
-// DecodeHookFunc is the callback function that can be used for
-// data transformations. See "DecodeHook" in the DecoderConfig
-// struct.
-//
-// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
-// DecodeHookFuncValue.
-// Values are a superset of Types (Values can return types), and Types are a
-// superset of Kinds (Types can return Kinds) and are generally a richer thing
-// to use, but Kinds are simpler if you only need those.
-//
-// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
-// we started with Kinds and then realized Types were the better solution,
-// but have a promise to not break backwards compat so we now support
-// both.
-type DecodeHookFunc any
-
-// DecodeHookFuncType is a DecodeHookFunc which has complete information about
-// the source and target types.
-type DecodeHookFuncType func(reflect.Type, reflect.Type, any) (any, error)
-
-// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
-// source and target types.
-type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error)
-
-// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
-// values.
-type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error)
-
-// DecoderConfig is the configuration that is used to create a new decoder
-// and allows customization of various aspects of decoding.
-type DecoderConfig struct {
- // DecodeHook, if set, will be called before any decoding and any
- // type conversion (if WeaklyTypedInput is on). This lets you modify
- // the values before they're set down onto the resulting struct. The
- // DecodeHook is called for every map and value in the input. This means
- // that if a struct has embedded fields with squash tags the decode hook
- // is called only once with all of the input data, not once for each
- // embedded struct.
- //
- // If an error is returned, the entire decode will fail with that error.
- DecodeHook DecodeHookFunc
-
- // If ErrorUnused is true, then it is an error for there to exist
- // keys in the original map that were unused in the decoding process
- // (extra keys).
- ErrorUnused bool
-
- // If ErrorUnset is true, then it is an error for there to exist
- // fields in the result that were not set in the decoding process
- // (extra fields). This only applies to decoding to a struct. This
- // will affect all nested structs as well.
- ErrorUnset bool
-
- // AllowUnsetPointer, if set to true, will prevent fields with pointer types
- // from being reported as unset, even if ErrorUnset is true and the field was
- // not present in the input data. This allows pointer fields to be optional
- // without triggering an error when they are missing.
- AllowUnsetPointer bool
-
- // ZeroFields, if set to true, will zero fields before writing them.
- // For example, a map will be emptied before decoded values are put in
- // it. If this is false, a map will be merged.
- ZeroFields bool
-
- // If WeaklyTypedInput is true, the decoder will make the following
- // "weak" conversions:
- //
- // - bools to string (true = "1", false = "0")
- // - numbers to string (base 10)
- // - bools to int/uint (true = 1, false = 0)
- // - strings to int/uint (base implied by prefix)
- // - int to bool (true if value != 0)
- // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
- // FALSE, false, False. Anything else is an error)
- // - empty array = empty map and vice versa
- // - negative numbers to overflowed uint values (base 10)
- // - slice of maps to a merged map
- // - single values are converted to slices if required. Each
- // element is weakly decoded. For example: "4" can become []int{4}
- // if the target type is an int slice.
- //
- WeaklyTypedInput bool
-
- // Squash will squash embedded structs. A squash tag may also be
- // added to an individual struct field using a tag. For example:
- //
- // type Parent struct {
- // Child `mapstructure:",squash"`
- // }
- Squash bool
-
- // Metadata is the struct that will contain extra metadata about
- // the decoding. If this is nil, then no metadata will be tracked.
- Metadata *Metadata
-
- // Result is a pointer to the struct that will contain the decoded
- // value.
- Result any
-
- // The tag name that mapstructure reads for field names. This
- // defaults to "mapstructure"
- TagName string
-
- // The option of the value in the tag that indicates a field should
- // be squashed. This defaults to "squash".
- SquashTagOption string
-
- // IgnoreUntaggedFields ignores all struct fields without explicit
- // TagName, comparable to `mapstructure:"-"` as default behaviour.
- IgnoreUntaggedFields bool
-
- // MatchName is the function used to match the map key to the struct
- // field name or tag. Defaults to `strings.EqualFold`. This can be used
- // to implement case-sensitive tag values, support snake casing, etc.
- MatchName func(mapKey, fieldName string) bool
-
- // DecodeNil, if set to true, will cause the DecodeHook (if present) to run
- // even if the input is nil. This can be used to provide default values.
- DecodeNil bool
-}
-
-// A Decoder takes a raw interface value and turns it into structured
-// data, keeping track of rich error information along the way in case
-// anything goes wrong. Unlike the basic top-level Decode method, you can
-// more finely control how the Decoder behaves using the DecoderConfig
-// structure. The top-level Decode method is just a convenience that sets
-// up the most basic Decoder.
-type Decoder struct {
- config *DecoderConfig
- cachedDecodeHook func(from reflect.Value, to reflect.Value) (any, error)
-}
-
-// Metadata contains information about decoding a structure that
-// is tedious or difficult to get otherwise.
-type Metadata struct {
- // Keys are the keys of the structure which were successfully decoded
- Keys []string
-
- // Unused is a slice of keys that were found in the raw value but
- // weren't decoded since there was no matching field in the result interface
- Unused []string
-
- // Unset is a slice of field names that were found in the result interface
- // but weren't set in the decoding process since there was no matching value
- // in the input
- Unset []string
-}
-
-// Decode takes an input structure and uses reflection to translate it to
-// the output structure. output must be a pointer to a map or struct.
-func Decode(input any, output any) error {
- config := &DecoderConfig{
- Metadata: nil,
- Result: output,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// WeakDecode is the same as Decode but is shorthand to enable
-// WeaklyTypedInput. See DecoderConfig for more info.
-func WeakDecode(input, output any) error {
- config := &DecoderConfig{
- Metadata: nil,
- Result: output,
- WeaklyTypedInput: true,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// DecodeMetadata is the same as Decode, but is shorthand to
-// enable metadata collection. See DecoderConfig for more info.
-func DecodeMetadata(input any, output any, metadata *Metadata) error {
- config := &DecoderConfig{
- Metadata: metadata,
- Result: output,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// WeakDecodeMetadata is the same as Decode, but is shorthand to
-// enable both WeaklyTypedInput and metadata collection. See
-// DecoderConfig for more info.
-func WeakDecodeMetadata(input any, output any, metadata *Metadata) error {
- config := &DecoderConfig{
- Metadata: metadata,
- Result: output,
- WeaklyTypedInput: true,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// NewDecoder returns a new decoder for the given configuration. Once
-// a decoder has been returned, the same configuration must not be used
-// again.
-func NewDecoder(config *DecoderConfig) (*Decoder, error) {
- val := reflect.ValueOf(config.Result)
- if val.Kind() != reflect.Ptr {
- return nil, errors.New("result must be a pointer")
- }
-
- val = val.Elem()
- if !val.CanAddr() {
- return nil, errors.New("result must be addressable (a pointer)")
- }
-
- if config.Metadata != nil {
- if config.Metadata.Keys == nil {
- config.Metadata.Keys = make([]string, 0)
- }
-
- if config.Metadata.Unused == nil {
- config.Metadata.Unused = make([]string, 0)
- }
-
- if config.Metadata.Unset == nil {
- config.Metadata.Unset = make([]string, 0)
- }
- }
-
- if config.TagName == "" {
- config.TagName = "mapstructure"
- }
-
- if config.SquashTagOption == "" {
- config.SquashTagOption = "squash"
- }
-
- if config.MatchName == nil {
- config.MatchName = strings.EqualFold
- }
-
- result := &Decoder{
- config: config,
- }
- if config.DecodeHook != nil {
- result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook)
- }
-
- return result, nil
-}
-
-// Decode decodes the given raw interface to the target pointer specified
-// by the configuration.
-func (d *Decoder) Decode(input any) error {
- err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
-
- // Retain some of the original behavior when multiple errors ocurr
- var joinedErr interface{ Unwrap() []error }
- if errors.As(err, &joinedErr) {
- return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err)
- }
-
- return err
-}
-
-// isNil returns true if the input is nil or a typed nil pointer.
-func isNil(input any) bool {
- if input == nil {
- return true
- }
- val := reflect.ValueOf(input)
- return val.Kind() == reflect.Ptr && val.IsNil()
-}
-
-// Decodes an unknown data type into a specific reflection value.
-func (d *Decoder) decode(name string, input any, outVal reflect.Value) error {
- var (
- inputVal = reflect.ValueOf(input)
- outputKind = getKind(outVal)
- decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil
- )
- if isNil(input) {
- // Typed nils won't match the "input == nil" below, so reset input.
- input = nil
- }
- if input == nil {
- // If the data is nil, then we don't set anything, unless ZeroFields is set
- // to true.
- if d.config.ZeroFields {
- outVal.Set(reflect.Zero(outVal.Type()))
-
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
- }
- if !decodeNil {
- return nil
- }
- }
- if !inputVal.IsValid() {
- if !decodeNil {
- // If the input value is invalid, then we just set the value
- // to be the zero value.
- outVal.Set(reflect.Zero(outVal.Type()))
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
- return nil
- }
- // Hooks need a valid inputVal, so reset it to zero value of outVal type.
- switch outputKind {
- case reflect.Struct, reflect.Map:
- var mapVal map[string]any
- inputVal = reflect.ValueOf(mapVal) // create nil map pointer
- case reflect.Slice, reflect.Array:
- var sliceVal []any
- inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer
- default:
- inputVal = reflect.Zero(outVal.Type())
- }
- }
-
- if d.cachedDecodeHook != nil {
- // We have a DecodeHook, so let's pre-process the input.
- var err error
- input, err = d.cachedDecodeHook(inputVal, outVal)
- if err != nil {
- return newDecodeError(name, err)
- }
- }
- if isNil(input) {
- return nil
- }
-
- var err error
- addMetaKey := true
- switch outputKind {
- case reflect.Bool:
- err = d.decodeBool(name, input, outVal)
- case reflect.Interface:
- err = d.decodeBasic(name, input, outVal)
- case reflect.String:
- err = d.decodeString(name, input, outVal)
- case reflect.Int:
- err = d.decodeInt(name, input, outVal)
- case reflect.Uint:
- err = d.decodeUint(name, input, outVal)
- case reflect.Float32:
- err = d.decodeFloat(name, input, outVal)
- case reflect.Complex64:
- err = d.decodeComplex(name, input, outVal)
- case reflect.Struct:
- err = d.decodeStruct(name, input, outVal)
- case reflect.Map:
- err = d.decodeMap(name, input, outVal)
- case reflect.Ptr:
- addMetaKey, err = d.decodePtr(name, input, outVal)
- case reflect.Slice:
- err = d.decodeSlice(name, input, outVal)
- case reflect.Array:
- err = d.decodeArray(name, input, outVal)
- case reflect.Func:
- err = d.decodeFunc(name, input, outVal)
- default:
- // If we reached this point then we weren't able to decode it
- return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind))
- }
-
- // If we reached here, then we successfully decoded SOMETHING, so
- // mark the key as used if we're tracking metainput.
- if addMetaKey && d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
-
- return err
-}
-
-// This decodes a basic type (bool, int, string, etc.) and sets the
-// value to "data" of that type.
-func (d *Decoder) decodeBasic(name string, data any, val reflect.Value) error {
- if val.IsValid() && val.Elem().IsValid() {
- elem := val.Elem()
-
- // If we can't address this element, then its not writable. Instead,
- // we make a copy of the value (which is a pointer and therefore
- // writable), decode into that, and replace the whole value.
- copied := false
- if !elem.CanAddr() {
- copied = true
-
- // Make *T
- copy := reflect.New(elem.Type())
-
- // *T = elem
- copy.Elem().Set(elem)
-
- // Set elem so we decode into it
- elem = copy
- }
-
- // Decode. If we have an error then return. We also return right
- // away if we're not a copy because that means we decoded directly.
- if err := d.decode(name, data, elem); err != nil || !copied {
- return err
- }
-
- // If we're a copy, we need to set te final result
- val.Set(elem.Elem())
- return nil
- }
-
- dataVal := reflect.ValueOf(data)
-
- // If the input data is a pointer, and the assigned type is the dereference
- // of that exact pointer, then indirect it so that we can assign it.
- // Example: *string to string
- if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
- dataVal = reflect.Indirect(dataVal)
- }
-
- if !dataVal.IsValid() {
- dataVal = reflect.Zero(val.Type())
- }
-
- dataValType := dataVal.Type()
- if !dataValType.AssignableTo(val.Type()) {
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-
- val.Set(dataVal)
- return nil
-}
-
-func (d *Decoder) decodeString(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- converted := true
- switch {
- case dataKind == reflect.String:
- val.SetString(dataVal.String())
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetString("1")
- } else {
- val.SetString("0")
- }
- case dataKind == reflect.Int && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatInt(dataVal.Int(), 10))
- case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
- case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
- case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
- dataKind == reflect.Array && d.config.WeaklyTypedInput:
- dataType := dataVal.Type()
- elemKind := dataType.Elem().Kind()
- switch elemKind {
- case reflect.Uint8:
- var uints []uint8
- if dataKind == reflect.Array {
- uints = make([]uint8, dataVal.Len(), dataVal.Len())
- for i := range uints {
- uints[i] = dataVal.Index(i).Interface().(uint8)
- }
- } else {
- uints = dataVal.Interface().([]uint8)
- }
- val.SetString(string(uints))
- default:
- converted = false
- }
- default:
- converted = false
- }
-
- if !converted {
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-
- return nil
-}
-
-func (d *Decoder) decodeInt(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- val.SetInt(dataVal.Int())
- case dataKind == reflect.Uint:
- val.SetInt(int64(dataVal.Uint()))
- case dataKind == reflect.Float32:
- val.SetInt(int64(dataVal.Float()))
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetInt(1)
- } else {
- val.SetInt(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- str := dataVal.String()
- if str == "" {
- str = "0"
- }
-
- i, err := strconv.ParseInt(str, 0, val.Type().Bits())
- if err == nil {
- val.SetInt(i)
- } else {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: wrapStrconvNumError(err),
- })
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := jn.Int64()
- if err != nil {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: err,
- })
- }
- val.SetInt(i)
- default:
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-
- return nil
-}
-
-func (d *Decoder) decodeUint(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- i := dataVal.Int()
- if i < 0 && !d.config.WeaklyTypedInput {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: fmt.Errorf("%d overflows uint", i),
- })
- }
- val.SetUint(uint64(i))
- case dataKind == reflect.Uint:
- val.SetUint(dataVal.Uint())
- case dataKind == reflect.Float32:
- f := dataVal.Float()
- if f < 0 && !d.config.WeaklyTypedInput {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: fmt.Errorf("%f overflows uint", f),
- })
- }
- val.SetUint(uint64(f))
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetUint(1)
- } else {
- val.SetUint(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- str := dataVal.String()
- if str == "" {
- str = "0"
- }
-
- i, err := strconv.ParseUint(str, 0, val.Type().Bits())
- if err == nil {
- val.SetUint(i)
- } else {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: wrapStrconvNumError(err),
- })
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := strconv.ParseUint(string(jn), 0, 64)
- if err != nil {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: wrapStrconvNumError(err),
- })
- }
- val.SetUint(i)
- default:
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-
- return nil
-}
-
-func (d *Decoder) decodeBool(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- switch {
- case dataKind == reflect.Bool:
- val.SetBool(dataVal.Bool())
- case dataKind == reflect.Int && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Int() != 0)
- case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Uint() != 0)
- case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Float() != 0)
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- b, err := strconv.ParseBool(dataVal.String())
- if err == nil {
- val.SetBool(b)
- } else if dataVal.String() == "" {
- val.SetBool(false)
- } else {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: wrapStrconvNumError(err),
- })
- }
- default:
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-
- return nil
-}
-
-func (d *Decoder) decodeFloat(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- val.SetFloat(float64(dataVal.Int()))
- case dataKind == reflect.Uint:
- val.SetFloat(float64(dataVal.Uint()))
- case dataKind == reflect.Float32:
- val.SetFloat(dataVal.Float())
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetFloat(1)
- } else {
- val.SetFloat(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- str := dataVal.String()
- if str == "" {
- str = "0"
- }
-
- f, err := strconv.ParseFloat(str, val.Type().Bits())
- if err == nil {
- val.SetFloat(f)
- } else {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: wrapStrconvNumError(err),
- })
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := jn.Float64()
- if err != nil {
- return newDecodeError(name, &ParseError{
- Expected: val,
- Value: data,
- Err: err,
- })
- }
- val.SetFloat(i)
- default:
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-
- return nil
-}
-
-func (d *Decoder) decodeComplex(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- switch {
- case dataKind == reflect.Complex64:
- val.SetComplex(dataVal.Complex())
- default:
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMap(name string, data any, val reflect.Value) error {
- valType := val.Type()
- valKeyType := valType.Key()
- valElemType := valType.Elem()
-
- // By default we overwrite keys in the current map
- valMap := val
-
- // If the map is nil or we're purposely zeroing fields, make a new map
- if valMap.IsNil() || d.config.ZeroFields {
- // Make a new map to hold our result
- mapType := reflect.MapOf(valKeyType, valElemType)
- valMap = reflect.MakeMap(mapType)
- }
-
- dataVal := reflect.ValueOf(data)
-
- // Resolve any levels of indirection
- for dataVal.Kind() == reflect.Pointer {
- dataVal = reflect.Indirect(dataVal)
- }
-
- // Check input type and based on the input type jump to the proper func
- switch dataVal.Kind() {
- case reflect.Map:
- return d.decodeMapFromMap(name, dataVal, val, valMap)
-
- case reflect.Struct:
- return d.decodeMapFromStruct(name, dataVal, val, valMap)
-
- case reflect.Array, reflect.Slice:
- if d.config.WeaklyTypedInput {
- return d.decodeMapFromSlice(name, dataVal, val, valMap)
- }
-
- fallthrough
-
- default:
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
-}
-
-func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- // Special case for BC reasons (covered by tests)
- if dataVal.Len() == 0 {
- val.Set(valMap)
- return nil
- }
-
- for i := 0; i < dataVal.Len(); i++ {
- err := d.decode(
- name+"["+strconv.Itoa(i)+"]",
- dataVal.Index(i).Interface(), val)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- valType := val.Type()
- valKeyType := valType.Key()
- valElemType := valType.Elem()
-
- // Accumulate errors
- var errs []error
-
- // If the input data is empty, then we just match what the input data is.
- if dataVal.Len() == 0 {
- if dataVal.IsNil() {
- if !val.IsNil() {
- val.Set(dataVal)
- }
- } else {
- // Set to empty allocated value
- val.Set(valMap)
- }
-
- return nil
- }
-
- for _, k := range dataVal.MapKeys() {
- fieldName := name + "[" + k.String() + "]"
-
- // First decode the key into the proper type
- currentKey := reflect.Indirect(reflect.New(valKeyType))
- if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
- errs = append(errs, err)
- continue
- }
-
- // Next decode the data into the proper type
- v := dataVal.MapIndex(k).Interface()
- currentVal := reflect.Indirect(reflect.New(valElemType))
- if err := d.decode(fieldName, v, currentVal); err != nil {
- errs = append(errs, err)
- continue
- }
-
- valMap.SetMapIndex(currentKey, currentVal)
- }
-
- // Set the built up map to the value
- val.Set(valMap)
-
- return errors.Join(errs...)
-}
-
-func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- typ := dataVal.Type()
- for i := 0; i < typ.NumField(); i++ {
- // Get the StructField first since this is a cheap operation. If the
- // field is unexported, then ignore it.
- f := typ.Field(i)
- if f.PkgPath != "" {
- continue
- }
-
- // Next get the actual value of this field and verify it is assignable
- // to the map value.
- v := dataVal.Field(i)
- if !v.Type().AssignableTo(valMap.Type().Elem()) {
- return newDecodeError(
- name+"."+f.Name,
- fmt.Errorf("cannot assign type %q to map value field of type %q", v.Type(), valMap.Type().Elem()),
- )
- }
-
- tagValue := f.Tag.Get(d.config.TagName)
- keyName := f.Name
-
- if tagValue == "" && d.config.IgnoreUntaggedFields {
- continue
- }
-
- // If Squash is set in the config, we squash the field down.
- squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
-
- v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
-
- // Determine the name of the key in the map
- if index := strings.Index(tagValue, ","); index != -1 {
- if tagValue[:index] == "-" {
- continue
- }
- // If "omitempty" is specified in the tag, it ignores empty values.
- if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
- continue
- }
-
- // If "omitzero" is specified in the tag, it ignores zero values.
- if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() {
- continue
- }
-
- // If "squash" is specified in the tag, we squash the field down.
- squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption)
- if squash {
- // When squashing, the embedded type can be a pointer to a struct.
- if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
- v = v.Elem()
- }
-
- // The final type must be a struct
- if v.Kind() != reflect.Struct {
- return newDecodeError(
- name+"."+f.Name,
- fmt.Errorf("cannot squash non-struct type %q", v.Type()),
- )
- }
- } else {
- if strings.Index(tagValue[index+1:], "remain") != -1 {
- if v.Kind() != reflect.Map {
- return newDecodeError(
- name+"."+f.Name,
- fmt.Errorf("error remain-tag field with invalid type: %q", v.Type()),
- )
- }
-
- ptr := v.MapRange()
- for ptr.Next() {
- valMap.SetMapIndex(ptr.Key(), ptr.Value())
- }
- continue
- }
- }
- if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
- keyName = keyNameTagValue
- }
- } else if len(tagValue) > 0 {
- if tagValue == "-" {
- continue
- }
- keyName = tagValue
- }
-
- switch v.Kind() {
- // this is an embedded struct, so handle it differently
- case reflect.Struct:
- x := reflect.New(v.Type())
- x.Elem().Set(v)
-
- vType := valMap.Type()
- vKeyType := vType.Key()
- vElemType := vType.Elem()
- mType := reflect.MapOf(vKeyType, vElemType)
- vMap := reflect.MakeMap(mType)
-
- // Creating a pointer to a map so that other methods can completely
- // overwrite the map if need be (looking at you decodeMapFromMap). The
- // indirection allows the underlying map to be settable (CanSet() == true)
- // where as reflect.MakeMap returns an unsettable map.
- addrVal := reflect.New(vMap.Type())
- reflect.Indirect(addrVal).Set(vMap)
-
- err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
- if err != nil {
- return err
- }
-
- // the underlying map may have been completely overwritten so pull
- // it indirectly out of the enclosing value.
- vMap = reflect.Indirect(addrVal)
-
- if squash {
- for _, k := range vMap.MapKeys() {
- valMap.SetMapIndex(k, vMap.MapIndex(k))
- }
- } else {
- valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
- }
-
- default:
- valMap.SetMapIndex(reflect.ValueOf(keyName), v)
- }
- }
-
- if val.CanAddr() {
- val.Set(valMap)
- }
-
- return nil
-}
-
-func (d *Decoder) decodePtr(name string, data any, val reflect.Value) (bool, error) {
- // If the input data is nil, then we want to just set the output
- // pointer to be nil as well.
- isNil := data == nil
- if !isNil {
- switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
- case reflect.Chan,
- reflect.Func,
- reflect.Interface,
- reflect.Map,
- reflect.Ptr,
- reflect.Slice:
- isNil = v.IsNil()
- }
- }
- if isNil {
- if !val.IsNil() && val.CanSet() {
- nilValue := reflect.New(val.Type()).Elem()
- val.Set(nilValue)
- }
-
- return true, nil
- }
-
- // Create an element of the concrete (non pointer) type and decode
- // into that. Then set the value of the pointer to this type.
- valType := val.Type()
- valElemType := valType.Elem()
- if val.CanSet() {
- realVal := val
- if realVal.IsNil() || d.config.ZeroFields {
- realVal = reflect.New(valElemType)
- }
-
- if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
- return false, err
- }
-
- val.Set(realVal)
- } else {
- if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
- return false, err
- }
- }
- return false, nil
-}
-
-func (d *Decoder) decodeFunc(name string, data any, val reflect.Value) error {
- // Create an element of the concrete (non pointer) type and decode
- // into that. Then set the value of the pointer to this type.
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- if val.Type() != dataVal.Type() {
- return newDecodeError(name, &UnconvertibleTypeError{
- Expected: val,
- Value: data,
- })
- }
- val.Set(dataVal)
- return nil
-}
-
-func (d *Decoder) decodeSlice(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataValKind := dataVal.Kind()
- valType := val.Type()
- valElemType := valType.Elem()
- sliceType := reflect.SliceOf(valElemType)
-
- // If we have a non array/slice type then we first attempt to convert.
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- if d.config.WeaklyTypedInput {
- switch {
- // Slice and array we use the normal logic
- case dataValKind == reflect.Slice, dataValKind == reflect.Array:
- break
-
- // Empty maps turn into empty slices
- case dataValKind == reflect.Map:
- if dataVal.Len() == 0 {
- val.Set(reflect.MakeSlice(sliceType, 0, 0))
- return nil
- }
- // Create slice of maps of other sizes
- return d.decodeSlice(name, []any{data}, val)
-
- case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
- return d.decodeSlice(name, []byte(dataVal.String()), val)
-
- // All other types we try to convert to the slice type
- // and "lift" it into it. i.e. a string becomes a string slice.
- default:
- // Just re-try this function with data as a slice.
- return d.decodeSlice(name, []any{data}, val)
- }
- }
-
- return newDecodeError(name,
- fmt.Errorf("source data must be an array or slice, got %s", dataValKind))
- }
-
- // If the input value is nil, then don't allocate since empty != nil
- if dataValKind != reflect.Array && dataVal.IsNil() {
- return nil
- }
-
- valSlice := val
- if valSlice.IsNil() || d.config.ZeroFields {
- // Make a new slice to hold our result, same size as the original data.
- valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
- } else if valSlice.Len() > dataVal.Len() {
- valSlice = valSlice.Slice(0, dataVal.Len())
- }
-
- // Accumulate any errors
- var errs []error
-
- for i := 0; i < dataVal.Len(); i++ {
- currentData := dataVal.Index(i).Interface()
- for valSlice.Len() <= i {
- valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
- }
- currentField := valSlice.Index(i)
-
- fieldName := name + "[" + strconv.Itoa(i) + "]"
- if err := d.decode(fieldName, currentData, currentField); err != nil {
- errs = append(errs, err)
- }
- }
-
- // Finally, set the value to the slice we built up
- val.Set(valSlice)
-
- return errors.Join(errs...)
-}
-
-func (d *Decoder) decodeArray(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataValKind := dataVal.Kind()
- valType := val.Type()
- valElemType := valType.Elem()
- arrayType := reflect.ArrayOf(valType.Len(), valElemType)
-
- valArray := val
-
- if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
- // Check input type
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- if d.config.WeaklyTypedInput {
- switch {
- // Empty maps turn into empty arrays
- case dataValKind == reflect.Map:
- if dataVal.Len() == 0 {
- val.Set(reflect.Zero(arrayType))
- return nil
- }
-
- // All other types we try to convert to the array type
- // and "lift" it into it. i.e. a string becomes a string array.
- default:
- // Just re-try this function with data as a slice.
- return d.decodeArray(name, []any{data}, val)
- }
- }
-
- return newDecodeError(name,
- fmt.Errorf("source data must be an array or slice, got %s", dataValKind))
-
- }
- if dataVal.Len() > arrayType.Len() {
- return newDecodeError(name,
- fmt.Errorf("expected source data to have length less or equal to %d, got %d", arrayType.Len(), dataVal.Len()))
- }
-
- // Make a new array to hold our result, same size as the original data.
- valArray = reflect.New(arrayType).Elem()
- }
-
- // Accumulate any errors
- var errs []error
-
- for i := 0; i < dataVal.Len(); i++ {
- currentData := dataVal.Index(i).Interface()
- currentField := valArray.Index(i)
-
- fieldName := name + "[" + strconv.Itoa(i) + "]"
- if err := d.decode(fieldName, currentData, currentField); err != nil {
- errs = append(errs, err)
- }
- }
-
- // Finally, set the value to the array we built up
- val.Set(valArray)
-
- return errors.Join(errs...)
-}
-
-func (d *Decoder) decodeStruct(name string, data any, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
-
- // If the type of the value to write to and the data match directly,
- // then we just set it directly instead of recursing into the structure.
- if dataVal.Type() == val.Type() {
- val.Set(dataVal)
- return nil
- }
-
- dataValKind := dataVal.Kind()
- switch dataValKind {
- case reflect.Map:
- return d.decodeStructFromMap(name, dataVal, val)
-
- case reflect.Struct:
- // Not the most efficient way to do this but we can optimize later if
- // we want to. To convert from struct to struct we go to map first
- // as an intermediary.
-
- // Make a new map to hold our result
- mapType := reflect.TypeOf((map[string]any)(nil))
- mval := reflect.MakeMap(mapType)
-
- // Creating a pointer to a map so that other methods can completely
- // overwrite the map if need be (looking at you decodeMapFromMap). The
- // indirection allows the underlying map to be settable (CanSet() == true)
- // where as reflect.MakeMap returns an unsettable map.
- addrVal := reflect.New(mval.Type())
-
- reflect.Indirect(addrVal).Set(mval)
- if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
- return err
- }
-
- result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
- return result
-
- default:
- return newDecodeError(name,
- fmt.Errorf("expected a map or struct, got %q", dataValKind))
- }
-}
-
-func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
- dataValType := dataVal.Type()
- if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
- return newDecodeError(name,
- fmt.Errorf("needs a map with string keys, has %q keys", kind))
- }
-
- dataValKeys := make(map[reflect.Value]struct{})
- dataValKeysUnused := make(map[any]struct{})
- for _, dataValKey := range dataVal.MapKeys() {
- dataValKeys[dataValKey] = struct{}{}
- dataValKeysUnused[dataValKey.Interface()] = struct{}{}
- }
-
- targetValKeysUnused := make(map[any]struct{})
-
- var errs []error
-
- // This slice will keep track of all the structs we'll be decoding.
- // There can be more than one struct if there are embedded structs
- // that are squashed.
- structs := make([]reflect.Value, 1, 5)
- structs[0] = val
-
- // Compile the list of all the fields that we're going to be decoding
- // from all the structs.
- type field struct {
- field reflect.StructField
- val reflect.Value
- }
-
- // remainField is set to a valid field set with the "remain" tag if
- // we are keeping track of remaining values.
- var remainField *field
-
- fields := []field{}
- for len(structs) > 0 {
- structVal := structs[0]
- structs = structs[1:]
-
- structType := structVal.Type()
-
- for i := 0; i < structType.NumField(); i++ {
- fieldType := structType.Field(i)
- fieldVal := structVal.Field(i)
- if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
- // Handle embedded struct pointers as embedded structs.
- fieldVal = fieldVal.Elem()
- }
-
- // If "squash" is specified in the tag, we squash the field down.
- squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
- remain := false
-
- // We always parse the tags cause we're looking for other tags too
- tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
- for _, tag := range tagParts[1:] {
- if tag == d.config.SquashTagOption {
- squash = true
- break
- }
-
- if tag == "remain" {
- remain = true
- break
- }
- }
-
- if squash {
- switch fieldVal.Kind() {
- case reflect.Struct:
- structs = append(structs, fieldVal)
- case reflect.Interface:
- if !fieldVal.IsNil() {
- structs = append(structs, fieldVal.Elem().Elem())
- }
- default:
- errs = append(errs, newDecodeError(
- name+"."+fieldType.Name,
- fmt.Errorf("unsupported type for squash: %s", fieldVal.Kind()),
- ))
- }
- continue
- }
-
- // Build our field
- if remain {
- remainField = &field{fieldType, fieldVal}
- } else {
- // Normal struct field, store it away
- fields = append(fields, field{fieldType, fieldVal})
- }
- }
- }
-
- // for fieldType, field := range fields {
- for _, f := range fields {
- field, fieldValue := f.field, f.val
- fieldName := field.Name
-
- tagValue := field.Tag.Get(d.config.TagName)
- if tagValue == "" && d.config.IgnoreUntaggedFields {
- continue
- }
- tagValue = strings.SplitN(tagValue, ",", 2)[0]
- if tagValue != "" {
- fieldName = tagValue
- }
-
- rawMapKey := reflect.ValueOf(fieldName)
- rawMapVal := dataVal.MapIndex(rawMapKey)
- if !rawMapVal.IsValid() {
- // Do a slower search by iterating over each key and
- // doing case-insensitive search.
- for dataValKey := range dataValKeys {
- mK, ok := dataValKey.Interface().(string)
- if !ok {
- // Not a string key
- continue
- }
-
- if d.config.MatchName(mK, fieldName) {
- rawMapKey = dataValKey
- rawMapVal = dataVal.MapIndex(dataValKey)
- break
- }
- }
-
- if !rawMapVal.IsValid() {
- // There was no matching key in the map for the value in
- // the struct. Remember it for potential errors and metadata.
- if !(d.config.AllowUnsetPointer && fieldValue.Kind() == reflect.Ptr) {
- targetValKeysUnused[fieldName] = struct{}{}
- }
- continue
- }
- }
-
- if !fieldValue.IsValid() {
- // This should never happen
- panic("field is not valid")
- }
-
- // If we can't set the field, then it is unexported or something,
- // and we just continue onwards.
- if !fieldValue.CanSet() {
- continue
- }
-
- // Delete the key we're using from the unused map so we stop tracking
- delete(dataValKeysUnused, rawMapKey.Interface())
-
- // If the name is empty string, then we're at the root, and we
- // don't dot-join the fields.
- if name != "" {
- fieldName = name + "." + fieldName
- }
-
- if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
- errs = append(errs, err)
- }
- }
-
- // If we have a "remain"-tagged field and we have unused keys then
- // we put the unused keys directly into the remain field.
- if remainField != nil && len(dataValKeysUnused) > 0 {
- // Build a map of only the unused values
- remain := map[any]any{}
- for key := range dataValKeysUnused {
- remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
- }
-
- // Decode it as-if we were just decoding this map onto our map.
- if err := d.decodeMap(name, remain, remainField.val); err != nil {
- errs = append(errs, err)
- }
-
- // Set the map to nil so we have none so that the next check will
- // not error (ErrorUnused)
- dataValKeysUnused = nil
- }
-
- if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
- keys := make([]string, 0, len(dataValKeysUnused))
- for rawKey := range dataValKeysUnused {
- keys = append(keys, rawKey.(string))
- }
- sort.Strings(keys)
-
- errs = append(errs, newDecodeError(
- name,
- fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")),
- ))
- }
-
- if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
- keys := make([]string, 0, len(targetValKeysUnused))
- for rawKey := range targetValKeysUnused {
- keys = append(keys, rawKey.(string))
- }
- sort.Strings(keys)
-
- errs = append(errs, newDecodeError(
- name,
- fmt.Errorf("has unset fields: %s", strings.Join(keys, ", ")),
- ))
- }
-
- if err := errors.Join(errs...); err != nil {
- return err
- }
-
- // Add the unused keys to the list of unused keys if we're tracking metadata
- if d.config.Metadata != nil {
- for rawKey := range dataValKeysUnused {
- key := rawKey.(string)
- if name != "" {
- key = name + "." + key
- }
-
- d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
- }
- for rawKey := range targetValKeysUnused {
- key := rawKey.(string)
- if name != "" {
- key = name + "." + key
- }
-
- d.config.Metadata.Unset = append(d.config.Metadata.Unset, key)
- }
- }
-
- return nil
-}
-
-func isEmptyValue(v reflect.Value) bool {
- switch getKind(v) {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
- return false
-}
-
-func getKind(val reflect.Value) reflect.Kind {
- kind := val.Kind()
-
- switch {
- case kind >= reflect.Int && kind <= reflect.Int64:
- return reflect.Int
- case kind >= reflect.Uint && kind <= reflect.Uint64:
- return reflect.Uint
- case kind >= reflect.Float32 && kind <= reflect.Float64:
- return reflect.Float32
- case kind >= reflect.Complex64 && kind <= reflect.Complex128:
- return reflect.Complex64
- default:
- return kind
- }
-}
-
-func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool {
- for i := 0; i < typ.NumField(); i++ {
- f := typ.Field(i)
- if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
- return true
- }
- if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
- return true
- }
- }
- return false
-}
-
-func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
- if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
- return v
- }
- deref := v.Elem()
- derefT := deref.Type()
- if isStructTypeConvertibleToMap(derefT, true, tagName) {
- return deref
- }
- return v
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
deleted file mode 100644
index d0913fff6..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
+++ /dev/null
@@ -1,44 +0,0 @@
-//go:build !go1.20
-
-package mapstructure
-
-import "reflect"
-
-func isComparable(v reflect.Value) bool {
- k := v.Kind()
- switch k {
- case reflect.Invalid:
- return false
-
- case reflect.Array:
- switch v.Type().Elem().Kind() {
- case reflect.Interface, reflect.Array, reflect.Struct:
- for i := 0; i < v.Type().Len(); i++ {
- // if !v.Index(i).Comparable() {
- if !isComparable(v.Index(i)) {
- return false
- }
- }
- return true
- }
- return v.Type().Comparable()
-
- case reflect.Interface:
- // return v.Elem().Comparable()
- return isComparable(v.Elem())
-
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- return false
-
- // if !v.Field(i).Comparable() {
- if !isComparable(v.Field(i)) {
- return false
- }
- }
- return true
-
- default:
- return v.Type().Comparable()
- }
-}
diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
deleted file mode 100644
index f8255a1b1..000000000
--- a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build go1.20
-
-package mapstructure
-
-import "reflect"
-
-// TODO: remove once we drop support for Go <1.20
-func isComparable(v reflect.Value) bool {
- return v.Comparable()
-}
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index 2a7d8c265..8dc247236 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -6,7 +6,12 @@
DNS version 2 is now available at , check it out if you want to
help shape the next 15 years of the Go DNS package.
-The version here will see no new features and less and less development.
+The version here will see no new features and less and less development, and my time (if any) will be fully
+devoted towards v2.
+
+**December 2025**: v2 should be (already) a good replacement, the coming months would be a good time to
+migrate, see [this file describing the
+differences](https://codeberg.org/miekg/dns/src/branch/main/README-diff-with-v1.md), to help you get started.
# Alternative (more granular) approach to a DNS library
@@ -62,7 +67,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
- https://www.dnsperf.com/
- https://dnssectest.net/
- https://github.com/oif/apex
-- https://github.com/jedisct1/dnscrypt-proxy
+- https://github.com/jedisct1/dnscrypt-proxy (migrated to v2)
- https://github.com/jedisct1/rpdns
- https://github.com/xor-gate/sshfp
- https://github.com/rs/dnstrace
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
index 31957b2ea..f7c6525dd 100644
--- a/vendor/github.com/miekg/dns/scan.go
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"io/fs"
+ "math"
"os"
"path"
"path/filepath"
@@ -1231,7 +1232,7 @@ func typeToInt(token string) (uint16, bool) {
// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
func stringToTTL(token string) (uint32, bool) {
- var s, i uint32
+ var s, i uint
for _, c := range token {
switch c {
case 's', 'S':
@@ -1251,12 +1252,15 @@ func stringToTTL(token string) (uint32, bool) {
i = 0
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
i *= 10
- i += uint32(c) - '0'
+ i += uint(c) - '0'
default:
return 0, false
}
}
- return s + i, true
+ if s+i > math.MaxUint32 {
+ return 0, false
+ }
+ return uint32(s + i), true
}
// Parse LOC records' [.][mM] into a
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index c53a63d7c..21f130b08 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = v{1, 1, 69}
+var Version = v{1, 1, 70}
// v holds the version of this library.
type v struct {
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index c34c7de43..4e4c13e72 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -45,6 +45,7 @@ const (
// The Content-Type values for the different wire protocols. Do not do direct
// comparisons to these constants, instead use the comparison functions.
+ //
// Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
FmtUnknown Format = ``
// Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index 0290f6abc..872c0c15b 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -13,7 +13,6 @@
// Build only when actually fuzzing
//go:build gofuzz
-// +build gofuzz
package expfmt
diff --git a/vendor/github.com/quic-go/quic-go/README.md b/vendor/github.com/quic-go/quic-go/README.md
index 53bbf4a0a..85751dbe7 100644
--- a/vendor/github.com/quic-go/quic-go/README.md
+++ b/vendor/github.com/quic-go/quic-go/README.md
@@ -1,6 +1,9 @@
+
+

+
+
# A QUIC implementation in pure Go
-
[](https://quic-go.net/docs/)
[](https://pkg.go.dev/github.com/quic-go/quic-go)
@@ -52,3 +55,7 @@ quic-go always aims to support the latest two Go releases.
## Contributing
We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
+
+## License
+
+The code is licensed under the MIT license. The logo and brand assets are excluded from the MIT license. See [assets/LICENSE.md](https://github.com/quic-go/quic-go/tree/master/assets/LICENSE.md) for the full usage policy and details.
diff --git a/vendor/github.com/quic-go/quic-go/SECURITY.md b/vendor/github.com/quic-go/quic-go/SECURITY.md
index c24c08f86..79fe1f568 100644
--- a/vendor/github.com/quic-go/quic-go/SECURITY.md
+++ b/vendor/github.com/quic-go/quic-go/SECURITY.md
@@ -1,19 +1,14 @@
# Security Policy
-quic-go still in development. This means that there may be problems in our protocols,
-or there may be mistakes in our implementations.
-We take security vulnerabilities very seriously. If you discover a security issue,
-please bring it to our attention right away!
+quic-go is an implementation of the QUIC protocol and related standards. No software is perfect, and we take reports of potential security issues very seriously.
## Reporting a Vulnerability
-If you find a vulnerability that may affect live deployments -- for example, by exposing
-a remote execution exploit -- please [**report privately**](https://github.com/quic-go/quic-go/security/advisories/new).
-Please **DO NOT file a public issue**.
+If you discover a vulnerability that could affect production deployments (e.g., a remotely exploitable issue), please report it [**privately**](https://github.com/quic-go/quic-go/security/advisories/new).
+Please **DO NOT file a public issue** for exploitable vulnerabilities.
-If the issue is an implementation weakness that cannot be immediately exploited or
-something not yet deployed, just discuss it openly.
+If the issue is theoretical, non-exploitable, or related to an experimental feature, you may discuss it openly by filing a regular issue.
-## Reporting a non security bug
+## Reporting a non-security bug
-For non-security bugs, please simply file a GitHub [issue](https://github.com/quic-go/quic-go/issues/new).
+For bugs, feature requests, or other non-security concerns, please open a GitHub [issue](https://github.com/quic-go/quic-go/issues/new).
diff --git a/vendor/github.com/quic-go/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go
index faf9f1101..1cb396fcc 100644
--- a/vendor/github.com/quic-go/quic-go/connection.go
+++ b/vendor/github.com/quic-go/quic-go/connection.go
@@ -57,6 +57,11 @@ type receivedPacket struct {
info packetInfo // only valid if the contained IP address is valid
}
+type receivedPacketWithDatagramID struct {
+ receivedPacket
+ datagramID qlog.DatagramID
+}
+
func (p *receivedPacket) Size() protocol.ByteCount { return protocol.ByteCount(len(p.data)) }
func (p *receivedPacket) Clone() *receivedPacket {
@@ -94,9 +99,6 @@ func (e *errCloseForRecreating) Error() string {
var deadlineSendImmediately = monotime.Time(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine
-var connTracingID atomic.Uint64 // to be accessed atomically
-func nextConnTracingID() ConnectionTracingID { return ConnectionTracingID(connTracingID.Add(1)) }
-
type blockMode uint8
const (
@@ -183,8 +185,8 @@ type Conn struct {
ctxCancel context.CancelCauseFunc
handshakeCompleteChan chan struct{}
- undecryptablePackets []receivedPacket // undecryptable packets, waiting for a change in encryption level
- undecryptablePacketsToProcess []receivedPacket
+ undecryptablePackets []receivedPacketWithDatagramID // undecryptable packets, waiting for a change in encryption level
+ undecryptablePacketsToProcess []receivedPacketWithDatagramID
earlyConnReadyChan chan struct{}
sentFirstPacket bool
@@ -308,13 +310,14 @@ var newConnection = func(
)
s.preSetup()
s.rttStats.SetInitialRTT(rtt)
- s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler(
+ s.sentPacketHandler = ackhandler.NewSentPacketHandler(
0,
protocol.ByteCount(s.config.InitialPacketSize),
s.rttStats,
&s.connStats,
clientAddressValidated,
s.conn.capabilities().ECN,
+ s.receivedPacketHandler.IgnorePacketsBelow,
s.perspective,
s.qlogger,
s.logger,
@@ -365,7 +368,7 @@ var newConnection = func(
s.version,
)
s.cryptoStreamHandler = cs
- s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
+ s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, &s.receivedPacketHandler, s.datagramQueue, s.perspective)
s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, s.oneRTTStream)
return &wrappedConn{Conn: s}
@@ -436,13 +439,14 @@ var newClientConnection = func(
)
s.ctx, s.ctxCancel = context.WithCancelCause(ctx)
s.preSetup()
- s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler(
+ s.sentPacketHandler = ackhandler.NewSentPacketHandler(
initialPacketNumber,
protocol.ByteCount(s.config.InitialPacketSize),
s.rttStats,
&s.connStats,
false, // has no effect
s.conn.capabilities().ECN,
+ s.receivedPacketHandler.IgnorePacketsBelow,
s.perspective,
s.qlogger,
s.logger,
@@ -490,7 +494,7 @@ var newClientConnection = func(
s.cryptoStreamHandler = cs
s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, oneRTTStream)
s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
- s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
+ s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, &s.receivedPacketHandler, s.datagramQueue, s.perspective)
if len(tlsConf.ServerName) > 0 {
s.tokenStoreKey = tlsConf.ServerName
} else {
@@ -550,6 +554,8 @@ func (c *Conn) preSetup() {
c.lastPacketReceivedTime = now
c.creationTime = now
+ c.receivedPacketHandler = *ackhandler.NewReceivedPacketHandler(c.logger)
+
c.datagramQueue = newDatagramQueue(c.scheduleSending, c.logger)
c.connState.Version = c.version
}
@@ -615,7 +621,7 @@ runLoop:
queue := c.undecryptablePacketsToProcess
c.undecryptablePacketsToProcess = nil
for _, p := range queue {
- processed, err := c.handleOnePacket(p)
+ processed, err := c.handleOnePacket(p.receivedPacket, p.datagramID)
if err != nil {
c.setCloseError(&closeError{err: err})
break runLoop
@@ -768,10 +774,16 @@ func (c *Conn) supportsDatagrams() bool {
func (c *Conn) ConnectionState() ConnectionState {
c.connStateMutex.Lock()
defer c.connStateMutex.Unlock()
+
cs := c.cryptoStreamHandler.ConnectionState()
c.connState.TLS = cs.ConnectionState
c.connState.Used0RTT = cs.Used0RTT
- c.connState.SupportsStreamResetPartialDelivery = c.peerParams.EnableResetStreamAt
+ if c.peerParams != nil {
+ c.connState.SupportsDatagrams.Remote = c.supportsDatagrams()
+ c.connState.SupportsStreamResetPartialDelivery.Remote = c.peerParams.EnableResetStreamAt
+ }
+ c.connState.SupportsDatagrams.Local = c.config.EnableDatagrams
+ c.connState.SupportsStreamResetPartialDelivery.Local = c.config.EnableStreamResetPartialDelivery
c.connState.GSO = c.conn.capabilities().GSO
return c.connState
}
@@ -986,41 +998,48 @@ func (c *Conn) handleHandshakeConfirmed(now monotime.Time) error {
return nil
}
+const maxPacketsToProcess = 32
+
func (c *Conn) handlePackets() (wasProcessed bool, _ error) {
- // Now process all packets in the receivedPackets channel.
- // Limit the number of packets to the length of the receivedPackets channel,
+ // Process packets from the receivedPackets queue.
+ // Limit the number of packets to process to maxPacketsToProcess,
// so we eventually get a chance to send out an ACK when receiving a lot of packets.
c.receivedPacketMx.Lock()
- numPackets := c.receivedPackets.Len()
- if numPackets == 0 {
+
+ if c.receivedPackets.Empty() {
c.receivedPacketMx.Unlock()
return false, nil
}
var hasMorePackets bool
- for i := 0; i < numPackets; i++ {
- if i > 0 {
- c.receivedPacketMx.Lock()
- }
+ for range maxPacketsToProcess {
p := c.receivedPackets.PopFront()
- hasMorePackets = !c.receivedPackets.Empty()
c.receivedPacketMx.Unlock()
- processed, err := c.handleOnePacket(p)
+ var datagramID qlog.DatagramID
+ if c.qlogger != nil && wire.IsLongHeaderPacket(p.data[0]) {
+ datagramID = qlog.CalculateDatagramID(p.data)
+ }
+ processed, err := c.handleOnePacket(p, datagramID)
if err != nil {
return false, err
}
if processed {
wasProcessed = true
}
+ c.receivedPacketMx.Lock()
+ hasMorePackets = !c.receivedPackets.Empty()
if !hasMorePackets {
break
}
- // only process a single packet at a time before handshake completion
- if !c.handshakeComplete {
+ // Prioritize sending of new CRYPTO data.
+ // This is especially relevant when processing 0-RTT packets.
+ if !c.handshakeComplete && (c.initialStream.HasData() || c.handshakeStream.HasData()) {
break
}
}
+ c.receivedPacketMx.Unlock()
+
if hasMorePackets {
select {
case c.notifyReceivedPacket <- struct{}{}:
@@ -1030,12 +1049,11 @@ func (c *Conn) handlePackets() (wasProcessed bool, _ error) {
return wasProcessed, nil
}
-func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
+func (c *Conn) handleOnePacket(rp receivedPacket, datagramID qlog.DatagramID) (wasProcessed bool, _ error) {
c.sentPacketHandler.ReceivedBytes(rp.Size(), rp.rcvTime)
if wire.IsVersionNegotiationPacket(rp.data) {
- c.handleVersionNegotiationPacket(rp)
- return false, nil
+ return false, c.handleVersionNegotiationPacket(rp)
}
var counter uint8
@@ -1051,8 +1069,9 @@ func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
if err != nil {
if c.qlogger != nil {
c.qlogger.RecordEvent(qlog.PacketDropped{
- Raw: qlog.RawInfo{Length: len(data)},
- Trigger: qlog.PacketDropHeaderParseError,
+ Raw: qlog.RawInfo{Length: len(data)},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropHeaderParseError,
})
}
c.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", err)
@@ -1061,9 +1080,10 @@ func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
if destConnID != lastConnID {
if c.qlogger != nil {
c.qlogger.RecordEvent(qlog.PacketDropped{
- Header: qlog.PacketHeader{DestConnectionID: destConnID},
- Raw: qlog.RawInfo{Length: len(data)},
- Trigger: qlog.PacketDropUnknownConnectionID,
+ Header: qlog.PacketHeader{DestConnectionID: destConnID},
+ Raw: qlog.RawInfo{Length: len(data)},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropUnknownConnectionID,
})
}
c.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", destConnID, lastConnID)
@@ -1077,14 +1097,16 @@ func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
if c.qlogger != nil {
if err == wire.ErrUnsupportedVersion {
c.qlogger.RecordEvent(qlog.PacketDropped{
- Header: qlog.PacketHeader{Version: hdr.Version},
- Raw: qlog.RawInfo{Length: len(data)},
- Trigger: qlog.PacketDropUnsupportedVersion,
+ Header: qlog.PacketHeader{Version: hdr.Version},
+ Raw: qlog.RawInfo{Length: len(data)},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropUnsupportedVersion,
})
} else {
c.qlogger.RecordEvent(qlog.PacketDropped{
- Raw: qlog.RawInfo{Length: len(data)},
- Trigger: qlog.PacketDropHeaderParseError,
+ Raw: qlog.RawInfo{Length: len(data)},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropHeaderParseError,
})
}
}
@@ -1096,8 +1118,9 @@ func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
if hdr.Version != c.version {
if c.qlogger != nil {
c.qlogger.RecordEvent(qlog.PacketDropped{
- Raw: qlog.RawInfo{Length: len(data)},
- Trigger: qlog.PacketDropUnexpectedVersion,
+ Raw: qlog.RawInfo{Length: len(data)},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropUnexpectedVersion,
})
}
c.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, c.version)
@@ -1116,7 +1139,7 @@ func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
p.data = packetData
- processed, err := c.handleLongHeaderPacket(p, hdr)
+ processed, err := c.handleLongHeaderPacket(p, hdr, datagramID)
if err != nil {
return false, err
}
@@ -1128,7 +1151,7 @@ func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
if counter > 0 {
p.buffer.Split()
}
- processed, err := c.handleShortHeaderPacket(p, counter > 0)
+ processed, err := c.handleShortHeaderPacket(p, counter > 0, datagramID)
if err != nil {
return false, err
}
@@ -1144,7 +1167,11 @@ func (c *Conn) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
return wasProcessed, nil
}
-func (c *Conn) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasProcessed bool, _ error) {
+func (c *Conn) handleShortHeaderPacket(
+ p receivedPacket,
+ isCoalesced bool,
+ datagramID qlog.DatagramID, // only for logging
+) (wasProcessed bool, _ error) {
var wasQueued bool
defer func() {
@@ -1161,8 +1188,9 @@ func (c *Conn) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasP
PacketType: qlog.PacketType1RTT,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: len(p.data)},
- Trigger: qlog.PacketDropHeaderParseError,
+ Raw: qlog.RawInfo{Length: len(p.data)},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropHeaderParseError,
})
return false, nil
}
@@ -1179,7 +1207,7 @@ func (c *Conn) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasP
return false, &StatelessResetError{}
}
}
- wasQueued, err = c.handleUnpackError(err, p, qlog.PacketType1RTT)
+ wasQueued, err = c.handleUnpackError(err, p, qlog.PacketType1RTT, datagramID)
return false, err
}
c.largestRcvdAppData = max(c.largestRcvdAppData, pn)
@@ -1197,8 +1225,9 @@ func (c *Conn) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasP
PacketType: qlog.PacketType1RTT,
PacketNumber: pn,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropDuplicate,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropDuplicate,
})
}
return false, nil
@@ -1218,8 +1247,9 @@ func (c *Conn) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasP
Length: int(p.Size()),
PayloadLength: int(p.Size() - wire.ShortHeaderLen(destConnID, pnLen)),
},
- Frames: frames,
- ECN: toQlogECN(p.ecn),
+ DatagramID: datagramID,
+ Frames: frames,
+ ECN: toQlogECN(p.ecn),
})
}
}
@@ -1251,7 +1281,7 @@ func (c *Conn) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasP
return true, err
}
c.logger.Debugf("sending path probe packet to %s", p.remoteAddr)
- c.logShortHeaderPacket(probe.DestConnID, probe.Ack, probe.Frames, probe.StreamFrames, probe.PacketNumber, probe.PacketNumberLen, probe.KeyPhase, protocol.ECNNon, buf.Len(), false)
+ c.logShortHeaderPacketWithDatagramID(probe, protocol.ECNNon, buf.Len(), false, datagramID)
c.registerPackedShortHeaderPacket(probe, protocol.ECNNon, p.rcvTime)
c.sendQueue.SendProbe(buf, p.remoteAddr)
}
@@ -1275,7 +1305,7 @@ func (c *Conn) handleShortHeaderPacket(p receivedPacket, isCoalesced bool) (wasP
return true, nil
}
-func (c *Conn) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) (wasProcessed bool, _ error) {
+func (c *Conn) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header, datagramID qlog.DatagramID) (wasProcessed bool, _ error) {
var wasQueued bool
defer func() {
@@ -1298,8 +1328,9 @@ func (c *Conn) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) (wasPr
PacketType: qlog.PacketTypeInitial,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropUnknownConnectionID,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropUnknownConnectionID,
})
}
c.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", p.Size(), hdr.SrcConnectionID, c.handshakeDestConnID)
@@ -1313,8 +1344,9 @@ func (c *Conn) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) (wasPr
PacketType: qlog.PacketType0RTT,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropUnexpectedPacket,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropUnexpectedPacket,
})
}
return false, nil
@@ -1322,7 +1354,7 @@ func (c *Conn) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) (wasPr
packet, err := c.unpacker.UnpackLongHeader(hdr, p.data)
if err != nil {
- wasQueued, err = c.handleUnpackError(err, p, toQlogPacketType(hdr.Type))
+ wasQueued, err = c.handleUnpackError(err, p, toQlogPacketType(hdr.Type), datagramID)
return false, err
}
@@ -1342,20 +1374,21 @@ func (c *Conn) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) (wasPr
PacketNumber: pn,
Version: packet.hdr.Version,
},
- Raw: qlog.RawInfo{Length: int(p.Size()), PayloadLength: int(packet.hdr.Length)},
- Trigger: qlog.PacketDropDuplicate,
+ Raw: qlog.RawInfo{Length: int(p.Size()), PayloadLength: int(packet.hdr.Length)},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropDuplicate,
})
}
return false, nil
}
- if err := c.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
+ if err := c.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, datagramID, p.Size()); err != nil {
return false, err
}
return true, nil
}
-func (c *Conn) handleUnpackError(err error, p receivedPacket, pt qlog.PacketType) (wasQueued bool, _ error) {
+func (c *Conn) handleUnpackError(err error, p receivedPacket, pt qlog.PacketType, datagramID qlog.DatagramID) (wasQueued bool, _ error) {
switch err {
case handshake.ErrKeysDropped:
if c.qlogger != nil {
@@ -1366,8 +1399,9 @@ func (c *Conn) handleUnpackError(err error, p receivedPacket, pt qlog.PacketType
DestConnectionID: connID,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropKeyUnavailable,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropKeyUnavailable,
})
}
c.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size())
@@ -1375,7 +1409,7 @@ func (c *Conn) handleUnpackError(err error, p receivedPacket, pt qlog.PacketType
case handshake.ErrKeysNotYetAvailable:
// Sealer for this encryption level not yet available.
// Try again later.
- c.tryQueueingUndecryptablePacket(p, pt)
+ c.tryQueueingUndecryptablePacket(p, pt, datagramID)
return true, nil
case wire.ErrInvalidReservedBits:
return false, &qerr.TransportError{
@@ -1392,8 +1426,9 @@ func (c *Conn) handleUnpackError(err error, p receivedPacket, pt qlog.PacketType
DestConnectionID: connID,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropPayloadDecryptError,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropPayloadDecryptError,
})
}
c.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err)
@@ -1410,8 +1445,9 @@ func (c *Conn) handleUnpackError(err error, p receivedPacket, pt qlog.PacketType
DestConnectionID: connID,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropHeaderParseError,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropHeaderParseError,
})
}
c.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err)
@@ -1529,7 +1565,7 @@ func (c *Conn) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime monotime
return true
}
-func (c *Conn) handleVersionNegotiationPacket(p receivedPacket) {
+func (c *Conn) handleVersionNegotiationPacket(p receivedPacket) error {
if c.perspective == protocol.PerspectiveServer || // servers never receive version negotiation packets
c.receivedFirstPacket || c.versionNegotiated { // ignore delayed / duplicated version negotiation packets
if c.qlogger != nil {
@@ -1539,7 +1575,7 @@ func (c *Conn) handleVersionNegotiationPacket(p receivedPacket) {
Trigger: qlog.PacketDropUnexpectedPacket,
})
}
- return
+ return nil
}
src, dest, supportedVersions, err := wire.ParseVersionNegotiationPacket(p.data)
@@ -1552,7 +1588,7 @@ func (c *Conn) handleVersionNegotiationPacket(p receivedPacket) {
})
}
c.logger.Debugf("Error parsing Version Negotiation packet: %s", err)
- return
+ return nil
}
if slices.Contains(supportedVersions, c.version) {
@@ -1565,7 +1601,7 @@ func (c *Conn) handleVersionNegotiationPacket(p receivedPacket) {
}
// The Version Negotiation packet contains the version that we offered.
// This might be a packet sent by an attacker, or it was corrupted.
- return
+ return nil
}
c.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", supportedVersions)
@@ -1585,7 +1621,7 @@ func (c *Conn) handleVersionNegotiationPacket(p receivedPacket) {
Theirs: supportedVersions,
})
c.logger.Infof("No compatible QUIC version found.")
- return
+ return nil
}
if c.qlogger != nil {
c.qlogger.RecordEvent(qlog.VersionInformation{
@@ -1597,16 +1633,17 @@ func (c *Conn) handleVersionNegotiationPacket(p receivedPacket) {
c.logger.Infof("Switching to QUIC version %s.", newVersion)
nextPN, _ := c.sentPacketHandler.PeekPacketNumber(protocol.EncryptionInitial)
- c.destroyImpl(&errCloseForRecreating{
+ return &errCloseForRecreating{
nextPacketNumber: nextPN,
nextVersion: newVersion,
- })
+ }
}
func (c *Conn) handleUnpackedLongHeaderPacket(
packet *unpackedPacket,
ecn protocol.ECN,
rcvTime monotime.Time,
+ datagramID qlog.DatagramID, // only for logging
packetSize protocol.ByteCount, // only for logging
) error {
if !c.receivedFirstPacket {
@@ -1692,8 +1729,9 @@ func (c *Conn) handleUnpackedLongHeaderPacket(
Length: int(packetSize),
PayloadLength: int(packet.hdr.Length),
},
- Frames: frames,
- ECN: toQlogECN(ecn),
+ DatagramID: datagramID,
+ Frames: frames,
+ ECN: toQlogECN(ecn),
})
}
}
@@ -1701,6 +1739,7 @@ func (c *Conn) handleUnpackedLongHeaderPacket(
if err != nil {
return err
}
+ c.sentPacketHandler.ReceivedPacket(packet.encryptionLevel, rcvTime)
return c.receivedPacketHandler.ReceivedPacket(packet.hdr.PacketNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting)
}
@@ -1720,6 +1759,7 @@ func (c *Conn) handleUnpackedShortHeaderPacket(
if err != nil {
return false, nil, err
}
+ c.sentPacketHandler.ReceivedPacket(protocol.Encryption1RTT, rcvTime)
if err := c.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting); err != nil {
return false, nil, err
}
@@ -1916,9 +1956,14 @@ func (c *Conn) handlePacket(p receivedPacket) {
// the channel size, protocol.MaxConnUnprocessedPackets
if c.receivedPackets.Len() >= protocol.MaxConnUnprocessedPackets {
if c.qlogger != nil {
+ var datagramID qlog.DatagramID
+ if wire.IsLongHeaderPacket(p.data[0]) {
+ datagramID = qlog.CalculateDatagramID(p.data)
+ }
c.qlogger.RecordEvent(qlog.PacketDropped{
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropDOSPrevention,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropDOSPrevention,
})
}
c.receivedPacketMx.Unlock()
@@ -2303,9 +2348,6 @@ func (c *Conn) restoreTransportParameters(params *wire.TransportParameters) {
c.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit)
c.connFlowController.UpdateSendWindow(params.InitialMaxData)
c.streamsMap.HandleTransportParameters(params)
- c.connStateMutex.Lock()
- c.connState.SupportsDatagrams = c.supportsDatagrams()
- c.connStateMutex.Unlock()
}
func (c *Conn) handleTransportParameters(params *wire.TransportParameters) error {
@@ -2335,10 +2377,6 @@ func (c *Conn) handleTransportParameters(params *wire.TransportParameters) error
// the client's transport parameters.
close(c.earlyConnReadyChan)
}
-
- c.connStateMutex.Lock()
- c.connState.SupportsDatagrams = c.supportsDatagrams()
- c.connStateMutex.Unlock()
return nil
}
@@ -2456,7 +2494,7 @@ func (c *Conn) sendPackets(now monotime.Time) error {
return err
}
c.logger.Debugf("sending path probe packet from %s", c.LocalAddr())
- c.logShortHeaderPacket(probe.DestConnID, probe.Ack, probe.Frames, probe.StreamFrames, probe.PacketNumber, probe.PacketNumberLen, probe.KeyPhase, protocol.ECNNon, buf.Len(), false)
+ c.logShortHeaderPacket(probe, protocol.ECNNon, buf.Len())
c.registerPackedShortHeaderPacket(probe, protocol.ECNNon, now)
tr.WriteTo(buf.Data, c.conn.RemoteAddr())
// There's (likely) more data to send. Loop around again.
@@ -2477,7 +2515,7 @@ func (c *Conn) sendPackets(now monotime.Time) error {
return err
}
ecn := c.sentPacketHandler.ECNMode(true)
- c.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false)
+ c.logShortHeaderPacket(p, ecn, buf.Len())
c.registerPackedShortHeaderPacket(p, ecn, now)
c.sendQueue.Send(buf, 0, ecn)
// There's (likely) more data to send. Loop around again.
@@ -2646,7 +2684,7 @@ func (c *Conn) maybeSendAckOnlyPacket(now monotime.Time) error {
}
return err
}
- c.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false)
+ c.logShortHeaderPacket(p, ecn, buf.Len())
c.registerPackedShortHeaderPacket(p, ecn, now)
c.sendQueue.Send(buf, 0, ecn)
return nil
@@ -2700,7 +2738,7 @@ func (c *Conn) appendOneShortHeaderPacket(buf *packetBuffer, maxSize protocol.By
return 0, err
}
size := buf.Len() - startLen
- c.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, size, false)
+ c.logShortHeaderPacket(p, ecn, size)
c.registerPackedShortHeaderPacket(p, ecn, now)
return size, nil
}
@@ -2920,7 +2958,7 @@ func (c *Conn) scheduleSending() {
// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys.
// The qlogevents.PacketType is only used for logging purposes.
-func (c *Conn) tryQueueingUndecryptablePacket(p receivedPacket, pt qlog.PacketType) {
+func (c *Conn) tryQueueingUndecryptablePacket(p receivedPacket, pt qlog.PacketType, datagramID qlog.DatagramID) {
if c.handshakeComplete {
panic("shouldn't queue undecryptable packets after handshake completion")
}
@@ -2931,8 +2969,9 @@ func (c *Conn) tryQueueingUndecryptablePacket(p receivedPacket, pt qlog.PacketTy
PacketType: pt,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
- Trigger: qlog.PacketDropDOSPrevention,
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
+ Trigger: qlog.PacketDropDOSPrevention,
})
}
c.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", p.Size())
@@ -2945,10 +2984,11 @@ func (c *Conn) tryQueueingUndecryptablePacket(p receivedPacket, pt qlog.PacketTy
PacketType: pt,
PacketNumber: protocol.InvalidPacketNumber,
},
- Raw: qlog.RawInfo{Length: int(p.Size())},
+ Raw: qlog.RawInfo{Length: int(p.Size())},
+ DatagramID: datagramID,
})
}
- c.undecryptablePackets = append(c.undecryptablePackets, p)
+ c.undecryptablePackets = append(c.undecryptablePackets, receivedPacketWithDatagramID{receivedPacket: p, datagramID: datagramID})
}
func (c *Conn) queueControlFrame(f wire.Frame) {
diff --git a/vendor/github.com/quic-go/quic-go/connection_logging.go b/vendor/github.com/quic-go/quic-go/connection_logging.go
index 0c6221d5b..c828c8be0 100644
--- a/vendor/github.com/quic-go/quic-go/connection_logging.go
+++ b/vendor/github.com/quic-go/quic-go/connection_logging.go
@@ -5,7 +5,6 @@ import (
"net/netip"
"slices"
- "github.com/quic-go/quic-go/internal/ackhandler"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/wire"
"github.com/quic-go/quic-go/qlog"
@@ -58,7 +57,7 @@ func toQlogAckFrame(f *wire.AckFrame) *qlog.AckFrame {
return ack
}
-func (c *Conn) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) {
+func (c *Conn) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN, datagramID qlog.DatagramID) {
// quic-go logging
if c.logger.Debug() {
p.header.Log(c.logger)
@@ -102,91 +101,85 @@ func (c *Conn) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) {
Length: int(p.length),
PayloadLength: int(p.header.Length),
},
- Frames: frames,
- ECN: toQlogECN(ecn),
+ DatagramID: datagramID,
+ Frames: frames,
+ ECN: toQlogECN(ecn),
})
}
}
-func (c *Conn) logShortHeaderPacket(
- destConnID protocol.ConnectionID,
- ackFrame *wire.AckFrame,
- frames []ackhandler.Frame,
- streamFrames []ackhandler.StreamFrame,
- pn protocol.PacketNumber,
- pnLen protocol.PacketNumberLen,
- kp protocol.KeyPhaseBit,
- ecn protocol.ECN,
- size protocol.ByteCount,
- isCoalesced bool,
-) {
+func (c *Conn) logShortHeaderPacket(p shortHeaderPacket, ecn protocol.ECN, size protocol.ByteCount) {
+ c.logShortHeaderPacketWithDatagramID(p, ecn, size, false, 0)
+}
+
+func (c *Conn) logShortHeaderPacketWithDatagramID(p shortHeaderPacket, ecn protocol.ECN, size protocol.ByteCount, isCoalesced bool, datagramID qlog.DatagramID) {
if c.logger.Debug() && !isCoalesced {
- c.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", pn, size, c.logID, ecn)
+ c.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", p.PacketNumber, size, c.logID, ecn)
}
// quic-go logging
if c.logger.Debug() {
- wire.LogShortHeader(c.logger, destConnID, pn, pnLen, kp)
- if ackFrame != nil {
- wire.LogFrame(c.logger, ackFrame, true)
+ wire.LogShortHeader(c.logger, p.DestConnID, p.PacketNumber, p.PacketNumberLen, p.KeyPhase)
+ if p.Ack != nil {
+ wire.LogFrame(c.logger, p.Ack, true)
}
- for _, f := range frames {
+ for _, f := range p.Frames {
wire.LogFrame(c.logger, f.Frame, true)
}
- for _, f := range streamFrames {
+ for _, f := range p.StreamFrames {
wire.LogFrame(c.logger, f.Frame, true)
}
}
// tracing
if c.qlogger != nil {
- numFrames := len(frames) + len(streamFrames)
- if ackFrame != nil {
+ numFrames := len(p.Frames) + len(p.StreamFrames)
+ if p.Ack != nil {
numFrames++
}
fs := make([]qlog.Frame, 0, numFrames)
- if ackFrame != nil {
- fs = append(fs, toQlogFrame(ackFrame))
+ if p.Ack != nil {
+ fs = append(fs, toQlogFrame(p.Ack))
}
- for _, f := range frames {
+ for _, f := range p.Frames {
fs = append(fs, toQlogFrame(f.Frame))
}
- for _, f := range streamFrames {
+ for _, f := range p.StreamFrames {
fs = append(fs, toQlogFrame(f.Frame))
}
c.qlogger.RecordEvent(qlog.PacketSent{
Header: qlog.PacketHeader{
PacketType: qlog.PacketType1RTT,
- KeyPhaseBit: kp,
- PacketNumber: pn,
+ KeyPhaseBit: p.KeyPhase,
+ PacketNumber: p.PacketNumber,
Version: c.version,
- DestConnectionID: destConnID,
+ DestConnectionID: p.DestConnID,
},
Raw: qlog.RawInfo{
Length: int(size),
- PayloadLength: int(size - wire.ShortHeaderLen(destConnID, pnLen)),
+ PayloadLength: int(size - wire.ShortHeaderLen(p.DestConnID, p.PacketNumberLen)),
},
- Frames: fs,
- ECN: toQlogECN(ecn),
+ DatagramID: datagramID,
+ Frames: fs,
+ ECN: toQlogECN(ecn),
})
}
}
func (c *Conn) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) {
+ var datagramID qlog.DatagramID
+ if c.qlogger != nil {
+ datagramID = qlog.CalculateDatagramID(packet.buffer.Data)
+ }
if c.logger.Debug() {
// There's a short period between dropping both Initial and Handshake keys and completion of the handshake,
// during which we might call PackCoalescedPacket but just pack a short header packet.
if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil {
- c.logShortHeaderPacket(
- packet.shortHdrPacket.DestConnID,
- packet.shortHdrPacket.Ack,
- packet.shortHdrPacket.Frames,
- packet.shortHdrPacket.StreamFrames,
- packet.shortHdrPacket.PacketNumber,
- packet.shortHdrPacket.PacketNumberLen,
- packet.shortHdrPacket.KeyPhase,
+ c.logShortHeaderPacketWithDatagramID(
+ *packet.shortHdrPacket,
ecn,
packet.shortHdrPacket.Length,
false,
+ datagramID,
)
return
}
@@ -197,10 +190,10 @@ func (c *Conn) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) {
}
}
for _, p := range packet.longHdrPackets {
- c.logLongHeaderPacket(p, ecn)
+ c.logLongHeaderPacket(p, ecn, datagramID)
}
if p := packet.shortHdrPacket; p != nil {
- c.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, p.Length, true)
+ c.logShortHeaderPacketWithDatagramID(*p, ecn, p.Length, true, datagramID)
}
}
diff --git a/vendor/github.com/quic-go/quic-go/frame_sorter.go b/vendor/github.com/quic-go/quic-go/frame_sorter.go
index bee0abadb..20c6d9cc0 100644
--- a/vendor/github.com/quic-go/quic-go/frame_sorter.go
+++ b/vendor/github.com/quic-go/quic-go/frame_sorter.go
@@ -235,3 +235,40 @@ func (s *frameSorter) Pop() (protocol.ByteCount, []byte, func()) {
func (s *frameSorter) HasMoreData() bool {
return len(s.queue) > 0
}
+
+var errTooLittleData = errors.New("too little data")
+
+// Peek copies len(p) consecutive bytes starting at offset into p, without removing them.
+// It is only possible to peek from an offset where a frame starts.
+//
+// If there isn't enough consecutive data available, errTooLittleData is returned.
+func (s *frameSorter) Peek(offset protocol.ByteCount, p []byte) error {
+ if len(p) == 0 {
+ return nil
+ }
+
+ // first, check if we have enough consecutive data available
+ pos := offset
+ remaining := len(p)
+ for remaining > 0 {
+ entry, ok := s.queue[pos]
+ if !ok {
+ return errTooLittleData
+ }
+ entryLen := len(entry.Data)
+ if remaining <= entryLen {
+ break // enough data available
+ }
+ remaining -= entryLen
+ pos += protocol.ByteCount(entryLen)
+ }
+
+ pos = offset
+ var copied int
+ for copied < len(p) {
+ entry := s.queue[pos] // the entry is guaranteed to exist from the check above
+ copied += copy(p[copied:], entry.Data)
+ pos += protocol.ByteCount(len(entry.Data))
+ }
+ return nil
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/body.go b/vendor/github.com/quic-go/quic-go/http3/body.go
index 356f726a8..ffc57bcee 100644
--- a/vendor/github.com/quic-go/quic-go/http3/body.go
+++ b/vendor/github.com/quic-go/quic-go/http3/body.go
@@ -9,10 +9,14 @@ import (
"github.com/quic-go/quic-go"
)
-// A Hijacker allows hijacking of the stream creating part of a quic.Conn from a http.ResponseWriter.
-// It is used by WebTransport to create WebTransport streams after a session has been established.
-type Hijacker interface {
- Connection() *Conn
+// Settingser allows waiting for and retrieving the peer's HTTP/3 settings.
+type Settingser interface {
+ // ReceivedSettings returns a channel that is closed once the peer's SETTINGS frame was received.
+ // Settings can be obtained from the Settings method after the channel was closed.
+ ReceivedSettings() <-chan struct{}
+ // Settings returns the settings received on this connection.
+ // It is only valid to call this function after the channel returned by ReceivedSettings was closed.
+ Settings() *Settings
}
var errTooMuchData = errors.New("peer sent too much data")
diff --git a/vendor/github.com/quic-go/quic-go/http3/client.go b/vendor/github.com/quic-go/quic-go/http3/client.go
index 1b8a16f35..b8c56f36c 100644
--- a/vendor/github.com/quic-go/quic-go/http3/client.go
+++ b/vendor/github.com/quic-go/quic-go/http3/client.go
@@ -6,17 +6,16 @@ import (
"fmt"
"io"
"log/slog"
- "maps"
"net/http"
"net/http/httptrace"
"net/textproto"
+ "sync"
"time"
+ "github.com/quic-go/qpack"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3/qlog"
- "github.com/quic-go/quic-go/quicvarint"
-
- "github.com/quic-go/qpack"
+ "github.com/quic-go/quic-go/qlogwriter"
)
const (
@@ -33,6 +32,8 @@ const (
defaultMaxResponseHeaderBytes = 10 * 1 << 20 // 10 MB
)
+var errGoAway = errors.New("connection in graceful shutdown")
+
type errConnUnusable struct{ e error }
func (e *errConnUnusable) Unwrap() error { return e.e }
@@ -47,11 +48,10 @@ var defaultQuicConfig = &quic.Config{
// ClientConn is an HTTP/3 client doing requests to a single remote server.
type ClientConn struct {
- conn *Conn
+ conn *quic.Conn
+ rawConn *rawConn
- // Enable support for HTTP/3 datagrams (RFC 9297).
- // If a QUICConfig is set, datagram support also needs to be enabled on the QUIC layer by setting enableDatagrams.
- enableDatagrams bool
+ decoder *qpack.Decoder
// Additional HTTP/3 settings.
// It is invalid to specify any settings defined by RFC 9114 (HTTP/3) and RFC 9297 (HTTP Datagrams).
@@ -68,10 +68,14 @@ type ClientConn struct {
// However, if the user explicitly requested gzip it is not automatically uncompressed.
disableCompression bool
- logger *slog.Logger
+ streamMx sync.Mutex
+ maxStreamID quic.StreamID // set once a GOAWAY frame is received
+ lastStreamID quic.StreamID // the highest stream ID that was opened
+
+ qlogger qlogwriter.Recorder
+ logger *slog.Logger
requestWriter *requestWriter
- decoder *qpack.Decoder
}
var _ http.RoundTripper = &ClientConn{}
@@ -80,114 +84,180 @@ func newClientConn(
conn *quic.Conn,
enableDatagrams bool,
additionalSettings map[uint64]uint64,
- streamHijacker func(FrameType, quic.ConnectionTracingID, *quic.Stream, error) (hijacked bool, err error),
- uniStreamHijacker func(StreamType, quic.ConnectionTracingID, *quic.ReceiveStream, error) (hijacked bool),
maxResponseHeaderBytes int,
disableCompression bool,
logger *slog.Logger,
) *ClientConn {
+ var qlogger qlogwriter.Recorder
+ if qlogTrace := conn.QlogTrace(); qlogTrace != nil && qlogTrace.SupportsSchemas(qlog.EventSchema) {
+ qlogger = qlogTrace.AddProducer()
+ }
c := &ClientConn{
- enableDatagrams: enableDatagrams,
+ conn: conn,
additionalSettings: additionalSettings,
disableCompression: disableCompression,
+ maxStreamID: invalidStreamID,
+ lastStreamID: invalidStreamID,
logger: logger,
+ qlogger: qlogger,
+ decoder: qpack.NewDecoder(),
}
if maxResponseHeaderBytes <= 0 {
c.maxResponseHeaderBytes = defaultMaxResponseHeaderBytes
} else {
c.maxResponseHeaderBytes = maxResponseHeaderBytes
}
- c.decoder = qpack.NewDecoder()
c.requestWriter = newRequestWriter()
- c.conn = newConnection(
- conn.Context(),
+ c.rawConn = newRawConn(
conn,
- c.enableDatagrams,
- false, // client
+ enableDatagrams,
+ c.onStreamsEmpty,
+ c.handleControlStream,
+ qlogger,
c.logger,
- 0,
)
// send the SETTINGs frame, using 0-RTT data, if possible
go func() {
- if err := c.setupConn(); err != nil {
+ _, err := c.rawConn.openControlStream(&settingsFrame{
+ Datagram: enableDatagrams,
+ Other: additionalSettings,
+ MaxFieldSectionSize: int64(c.maxResponseHeaderBytes),
+ })
+ if err != nil {
if c.logger != nil {
- c.logger.Debug("Setting up connection failed", "error", err)
+ c.logger.Debug("setting up connection failed", "error", err)
}
c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeInternalError), "")
+ return
}
}()
- if streamHijacker != nil {
- go c.handleBidirectionalStreams(streamHijacker)
- }
- go c.conn.handleUnidirectionalStreams(uniStreamHijacker)
return c
}
// OpenRequestStream opens a new request stream on the HTTP/3 connection.
func (c *ClientConn) OpenRequestStream(ctx context.Context) (*RequestStream, error) {
- return c.conn.openRequestStream(ctx, c.requestWriter, nil, c.disableCompression, c.maxResponseHeaderBytes)
+ return c.openRequestStream(ctx, c.requestWriter, nil, c.disableCompression, c.maxResponseHeaderBytes)
}
-func (c *ClientConn) setupConn() error {
- // open the control stream
- str, err := c.conn.OpenUniStream()
+func (c *ClientConn) openRequestStream(
+ ctx context.Context,
+ requestWriter *requestWriter,
+ reqDone chan<- struct{},
+ disableCompression bool,
+ maxHeaderBytes int,
+) (*RequestStream, error) {
+ c.streamMx.Lock()
+ maxStreamID := c.maxStreamID
+ var nextStreamID quic.StreamID
+ if c.lastStreamID == invalidStreamID {
+ nextStreamID = 0
+ } else {
+ nextStreamID = c.lastStreamID + 4
+ }
+ c.streamMx.Unlock()
+ // Streams with stream ID equal to or greater than the stream ID carried in the GOAWAY frame
+ // will be rejected, see section 5.2 of RFC 9114.
+ if maxStreamID != invalidStreamID && nextStreamID >= maxStreamID {
+ return nil, errGoAway
+ }
+
+ str, err := c.conn.OpenStreamSync(ctx)
if err != nil {
- return err
+ return nil, err
}
- b := make([]byte, 0, 64)
- b = quicvarint.Append(b, streamTypeControlStream)
- // send the SETTINGS frame
- b = (&settingsFrame{
- Datagram: c.enableDatagrams,
- Other: c.additionalSettings,
- MaxFieldSectionSize: int64(c.maxResponseHeaderBytes),
- }).Append(b)
- if c.conn.qlogger != nil {
- sf := qlog.SettingsFrame{
- MaxFieldSectionSize: int64(c.maxResponseHeaderBytes),
- Other: maps.Clone(c.additionalSettings),
- }
- if c.enableDatagrams {
- sf.Datagram = pointer(true)
- }
- c.conn.qlogger.RecordEvent(qlog.FrameCreated{
- StreamID: str.StreamID(),
- Raw: qlog.RawInfo{Length: len(b)},
- Frame: qlog.Frame{Frame: sf},
- })
+
+ c.streamMx.Lock()
+ // take the maximum here, as multiple OpenStreamSync calls might have returned concurrently
+ if c.lastStreamID == invalidStreamID {
+ c.lastStreamID = str.StreamID()
+ } else {
+ c.lastStreamID = max(c.lastStreamID, str.StreamID())
}
- _, err = str.Write(b)
- return err
+ // check again, in case a (or another) GOAWAY frame was received
+ maxStreamID = c.maxStreamID
+ c.streamMx.Unlock()
+
+ if maxStreamID != invalidStreamID && str.StreamID() >= maxStreamID {
+ str.CancelRead(quic.StreamErrorCode(ErrCodeRequestCanceled))
+ str.CancelWrite(quic.StreamErrorCode(ErrCodeRequestCanceled))
+ return nil, errGoAway
+ }
+
+ hstr := c.rawConn.TrackStream(str)
+ rsp := &http.Response{}
+ trace := httptrace.ContextClientTrace(ctx)
+ return newRequestStream(
+ newStream(hstr, c.rawConn, trace, func(r io.Reader, hf *headersFrame) error {
+ hdr, err := decodeTrailers(r, hf, maxHeaderBytes, c.decoder, c.qlogger, str.StreamID())
+ if err != nil {
+ return err
+ }
+ rsp.Trailer = hdr
+ return nil
+ }, c.qlogger),
+ requestWriter,
+ reqDone,
+ c.decoder,
+ disableCompression,
+ maxHeaderBytes,
+ rsp,
+ ), nil
+}
+
+func (c *ClientConn) handleUnidirectionalStream(str *quic.ReceiveStream) {
+ c.rawConn.handleUnidirectionalStream(str, false)
}
-func (c *ClientConn) handleBidirectionalStreams(streamHijacker func(FrameType, quic.ConnectionTracingID, *quic.Stream, error) (hijacked bool, err error)) {
+func (c *ClientConn) handleControlStream(str *quic.ReceiveStream, fp *frameParser) {
for {
- str, err := c.conn.conn.AcceptStream(context.Background())
+ f, err := fp.ParseNext(c.qlogger)
if err != nil {
- if c.logger != nil {
- c.logger.Debug("accepting bidirectional stream failed", "error", err)
+ var serr *quic.StreamError
+ if err == io.EOF || errors.As(err, &serr) {
+ c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeClosedCriticalStream), "")
+ return
}
+ c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameError), "")
return
}
- fp := &frameParser{
- r: str,
- closeConn: c.conn.CloseWithError,
- unknownFrameHandler: func(ft FrameType, e error) (processed bool, err error) {
- id := c.conn.Context().Value(quic.ConnectionTracingKey).(quic.ConnectionTracingID)
- return streamHijacker(ft, id, str, e)
- },
+ // GOAWAY is the only frame allowed at this point:
+ // * unexpected frames are ignored by the frame parser
+ // * we don't support any extension that might add support for more frames
+ goaway, ok := f.(*goAwayFrame)
+ if !ok {
+ c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "")
+ return
}
- go func() {
- if _, err := fp.ParseNext(c.conn.qlogger); err == errHijacked {
- return
- }
- if err != nil {
- if c.logger != nil {
- c.logger.Debug("error handling stream", "error", err)
- }
- }
- c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "received HTTP/3 frame on bidirectional stream")
- }()
+ if goaway.StreamID%4 != 0 { // client-initiated, bidirectional streams
+ c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeIDError), "")
+ return
+ }
+ c.streamMx.Lock()
+ // the server is not allowed to increase the Stream ID in subsequent GOAWAY frames
+ if c.maxStreamID != invalidStreamID && goaway.StreamID > c.maxStreamID {
+ c.streamMx.Unlock()
+ c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeIDError), "")
+ return
+ }
+ c.maxStreamID = goaway.StreamID
+ c.streamMx.Unlock()
+
+ hasActiveStreams := c.rawConn.hasActiveStreams()
+ // immediately close the connection if there are currently no active requests
+ if !hasActiveStreams {
+ c.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "")
+ return
+ }
+ }
+}
+
+func (c *ClientConn) onStreamsEmpty() {
+ c.streamMx.Lock()
+ defer c.streamMx.Unlock()
+
+ // The server is performing a graceful shutdown.
+ if c.maxStreamID != invalidStreamID {
+ c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "")
}
}
@@ -229,17 +299,17 @@ func (c *ClientConn) roundTrip(req *http.Request) (*http.Response, error) {
connCtx := c.conn.Context()
// wait for the server's SETTINGS frame to arrive
select {
- case <-c.conn.ReceivedSettings():
+ case <-c.rawConn.ReceivedSettings():
case <-connCtx.Done():
return nil, context.Cause(connCtx)
}
- if !c.conn.Settings().EnableExtendedConnect {
+ if !c.rawConn.Settings().EnableExtendedConnect {
return nil, errors.New("http3: server didn't enable Extended CONNECT")
}
}
reqDone := make(chan struct{})
- str, err := c.conn.openRequestStream(
+ str, err := c.openRequestStream(
req.Context(),
c.requestWriter,
reqDone,
@@ -276,19 +346,19 @@ func (c *ClientConn) roundTrip(req *http.Request) (*http.Response, error) {
// ReceivedSettings returns a channel that is closed once the server's HTTP/3 settings were received.
// Settings can be obtained from the Settings method after the channel was closed.
func (c *ClientConn) ReceivedSettings() <-chan struct{} {
- return c.conn.ReceivedSettings()
+ return c.rawConn.ReceivedSettings()
}
// Settings returns the HTTP/3 settings for this connection.
// It is only valid to call this function after the channel returned by ReceivedSettings was closed.
func (c *ClientConn) Settings() *Settings {
- return c.conn.Settings()
+ return c.rawConn.Settings()
}
// CloseWithError closes the connection with the given error code and message.
// It is invalid to call this function after the connection was closed.
-func (c *ClientConn) CloseWithError(code ErrCode, msg string) error {
- return c.conn.CloseWithError(quic.ApplicationErrorCode(code), msg)
+func (c *ClientConn) CloseWithError(code quic.ApplicationErrorCode, msg string) error {
+ return c.conn.CloseWithError(code, msg)
}
// Context returns a context that is cancelled when the connection is closed.
@@ -352,6 +422,7 @@ func (c *ClientConn) doRequest(req *http.Request, str *RequestStream) (*http.Res
} else {
// send the request body asynchronously
go func() {
+ defer str.Close()
contentLength := int64(-1)
// According to the documentation for http.Request.ContentLength,
// a value of 0 with a non-nil Body is also treated as unknown content length.
@@ -364,8 +435,16 @@ func (c *ClientConn) doRequest(req *http.Request, str *RequestStream) (*http.Res
if c.logger != nil {
c.logger.Debug("error writing request", "error", err)
}
+ return
+ }
+
+ if len(req.Trailer) > 0 {
+ if err := str.sendRequestTrailer(req); err != nil {
+ if c.logger != nil {
+ c.logger.Debug("error writing trailers", "error", err)
+ }
+ }
}
- str.Close()
}()
}
}
@@ -404,9 +483,23 @@ func (c *ClientConn) doRequest(req *http.Request, str *RequestStream) (*http.Res
return res, nil
}
-// Conn returns the underlying HTTP/3 connection.
-// This method is only useful for advanced use cases, such as when the application needs to
-// open streams on the HTTP/3 connection (e.g. WebTransport).
-func (c *ClientConn) Conn() *Conn {
- return c.conn
+// RawClientConn is a low-level HTTP/3 client connection.
+// It allows the application to take control of the stream accept loops,
+// giving the application the ability to handle streams originating from the server.
+type RawClientConn struct {
+ *ClientConn
+}
+
+// HandleUnidirectionalStream handles an incoming unidirectional stream.
+func (c *RawClientConn) HandleUnidirectionalStream(str *quic.ReceiveStream) {
+ c.rawConn.handleUnidirectionalStream(str, false)
+}
+
+// HandleBidirectionalStream handles an incoming bidirectional stream.
+func (c *ClientConn) HandleBidirectionalStream(str *quic.Stream) {
+ // According to RFC 9114, the server is not allowed to open bidirectional streams.
+ c.rawConn.CloseWithError(
+ quic.ApplicationErrorCode(ErrCodeStreamCreationError),
+ fmt.Sprintf("server opened bidirectional stream %d", str.StreamID()),
+ )
}
diff --git a/vendor/github.com/quic-go/quic-go/http3/conn.go b/vendor/github.com/quic-go/quic-go/http3/conn.go
index edadbc234..2fe61ffd9 100644
--- a/vendor/github.com/quic-go/quic-go/http3/conn.go
+++ b/vendor/github.com/quic-go/quic-go/http3/conn.go
@@ -6,322 +6,203 @@ import (
"fmt"
"io"
"log/slog"
+ "maps"
"net"
- "net/http"
- "net/http/httptrace"
"sync"
"sync/atomic"
- "time"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3/qlog"
"github.com/quic-go/quic-go/qlogwriter"
"github.com/quic-go/quic-go/quicvarint"
-
- "github.com/quic-go/qpack"
)
const maxQuarterStreamID = 1<<60 - 1
-var errGoAway = errors.New("connection in graceful shutdown")
-
// invalidStreamID is a stream ID that is invalid. The first valid stream ID in QUIC is 0.
const invalidStreamID = quic.StreamID(-1)
-// Conn is an HTTP/3 connection.
-// It has all methods from the quic.Conn expect for AcceptStream, AcceptUniStream,
-// SendDatagram and ReceiveDatagram.
-type Conn struct {
+// rawConn is an HTTP/3 connection.
+// It provides HTTP/3 specific functionality by wrapping a quic.Conn,
+// in particular handling of unidirectional HTTP/3 streams, SETTINGS and datagrams.
+type rawConn struct {
conn *quic.Conn
- ctx context.Context
-
- isServer bool
- logger *slog.Logger
+ logger *slog.Logger
enableDatagrams bool
- decoder *qpack.Decoder
+ streamMx sync.Mutex
+ streams map[quic.StreamID]*stateTrackingStream
+
+ rcvdControlStr atomic.Bool
+ rcvdQPACKEncoderStr atomic.Bool
+ rcvdQPACKDecoderStr atomic.Bool
+ controlStrHandler func(*quic.ReceiveStream, *frameParser) // is called *after* the SETTINGS frame was parsed
- streamMx sync.Mutex
- streams map[quic.StreamID]*stateTrackingStream
- lastStreamID quic.StreamID
- maxStreamID quic.StreamID
+ onStreamsEmpty func()
settings *Settings
receivedSettings chan struct{}
- idleTimeout time.Duration
- idleTimer *time.Timer
-
- qlogger qlogwriter.Recorder
+ qlogger qlogwriter.Recorder
+ qloggerWG sync.WaitGroup // tracks goroutines that may produce qlog events
}
-func newConnection(
- ctx context.Context,
+func newRawConn(
quicConn *quic.Conn,
enableDatagrams bool,
- isServer bool,
+ onStreamsEmpty func(),
+ controlStrHandler func(*quic.ReceiveStream, *frameParser),
+ qlogger qlogwriter.Recorder,
logger *slog.Logger,
- idleTimeout time.Duration,
-) *Conn {
- var qlogger qlogwriter.Recorder
- if qlogTrace := quicConn.QlogTrace(); qlogTrace != nil && qlogTrace.SupportsSchemas(qlog.EventSchema) {
- qlogger = qlogTrace.AddProducer()
- }
- c := &Conn{
- ctx: ctx,
- conn: quicConn,
- isServer: isServer,
- logger: logger,
- idleTimeout: idleTimeout,
- enableDatagrams: enableDatagrams,
- decoder: qpack.NewDecoder(),
- receivedSettings: make(chan struct{}),
- streams: make(map[quic.StreamID]*stateTrackingStream),
- maxStreamID: invalidStreamID,
- lastStreamID: invalidStreamID,
- qlogger: qlogger,
+) *rawConn {
+ c := &rawConn{
+ conn: quicConn,
+ logger: logger,
+ enableDatagrams: enableDatagrams,
+ receivedSettings: make(chan struct{}),
+ streams: make(map[quic.StreamID]*stateTrackingStream),
+ qlogger: qlogger,
+ onStreamsEmpty: onStreamsEmpty,
+ controlStrHandler: controlStrHandler,
}
- if idleTimeout > 0 {
- c.idleTimer = time.AfterFunc(idleTimeout, c.onIdleTimer)
+ if qlogger != nil {
+ context.AfterFunc(quicConn.Context(), c.closeQlogger)
}
return c
}
-func (c *Conn) OpenStream() (*quic.Stream, error) {
- return c.conn.OpenStream()
+func (c *rawConn) OpenUniStream() (*quic.SendStream, error) {
+ return c.conn.OpenUniStream()
}
-func (c *Conn) OpenStreamSync(ctx context.Context) (*quic.Stream, error) {
- return c.conn.OpenStreamSync(ctx)
-}
+// openControlStream opens the control stream and sends the SETTINGS frame.
+// It returns the control stream (needed by the server for sending GOAWAY later).
+func (c *rawConn) openControlStream(settings *settingsFrame) (*quic.SendStream, error) {
+ c.qloggerWG.Add(1)
+ defer c.qloggerWG.Done()
-func (c *Conn) OpenUniStream() (*quic.SendStream, error) {
- return c.conn.OpenUniStream()
+ str, err := c.conn.OpenUniStream()
+ if err != nil {
+ return nil, err
+ }
+ b := make([]byte, 0, 64)
+ b = quicvarint.Append(b, streamTypeControlStream)
+ b = settings.Append(b)
+ if c.qlogger != nil {
+ sf := qlog.SettingsFrame{
+ MaxFieldSectionSize: settings.MaxFieldSectionSize,
+ Other: maps.Clone(settings.Other),
+ }
+ if settings.Datagram {
+ sf.Datagram = pointer(true)
+ }
+ if settings.ExtendedConnect {
+ sf.ExtendedConnect = pointer(true)
+ }
+ c.qlogger.RecordEvent(qlog.FrameCreated{
+ StreamID: str.StreamID(),
+ Raw: qlog.RawInfo{Length: len(b)},
+ Frame: qlog.Frame{Frame: sf},
+ })
+ }
+ if _, err := str.Write(b); err != nil {
+ return nil, err
+ }
+ return str, nil
}
-func (c *Conn) OpenUniStreamSync(ctx context.Context) (*quic.SendStream, error) {
- return c.conn.OpenUniStreamSync(ctx)
-}
+func (c *rawConn) TrackStream(str *quic.Stream) *stateTrackingStream {
+ hstr := newStateTrackingStream(str, c, func(b []byte) error { return c.sendDatagram(str.StreamID(), b) })
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
+ c.streamMx.Lock()
+ c.streams[str.StreamID()] = hstr
+ c.qloggerWG.Add(1)
+ c.streamMx.Unlock()
+ return hstr
}
-func (c *Conn) RemoteAddr() net.Addr {
+func (c *rawConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
-func (c *Conn) HandshakeComplete() <-chan struct{} {
- return c.conn.HandshakeComplete()
-}
-
-func (c *Conn) ConnectionState() quic.ConnectionState {
+func (c *rawConn) ConnectionState() quic.ConnectionState {
return c.conn.ConnectionState()
}
-func (c *Conn) onIdleTimer() {
- c.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "idle timeout")
-}
-
-func (c *Conn) clearStream(id quic.StreamID) {
+func (c *rawConn) clearStream(id quic.StreamID) {
c.streamMx.Lock()
defer c.streamMx.Unlock()
- delete(c.streams, id)
- if c.idleTimeout > 0 && len(c.streams) == 0 {
- c.idleTimer.Reset(c.idleTimeout)
+ if _, ok := c.streams[id]; ok {
+ delete(c.streams, id)
+ c.qloggerWG.Done()
}
- // The server is performing a graceful shutdown.
- // If no more streams are remaining, close the connection.
- if c.maxStreamID != invalidStreamID {
- if len(c.streams) == 0 {
- c.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "")
- }
+ if len(c.streams) == 0 {
+ c.onStreamsEmpty()
}
}
-func (c *Conn) openRequestStream(
- ctx context.Context,
- requestWriter *requestWriter,
- reqDone chan<- struct{},
- disableCompression bool,
- maxHeaderBytes int,
-) (*RequestStream, error) {
+func (c *rawConn) hasActiveStreams() bool {
c.streamMx.Lock()
- maxStreamID := c.maxStreamID
- var nextStreamID quic.StreamID
- if c.lastStreamID == invalidStreamID {
- nextStreamID = 0
- } else {
- nextStreamID = c.lastStreamID + 4
- }
- c.streamMx.Unlock()
- // Streams with stream ID equal to or greater than the stream ID carried in the GOAWAY frame
- // will be rejected, see section 5.2 of RFC 9114.
- if maxStreamID != invalidStreamID && nextStreamID >= maxStreamID {
- return nil, errGoAway
- }
+ defer c.streamMx.Unlock()
- str, err := c.OpenStreamSync(ctx)
- if err != nil {
- return nil, err
- }
- hstr := newStateTrackingStream(str, c, func(b []byte) error { return c.sendDatagram(str.StreamID(), b) })
- c.streamMx.Lock()
- c.streams[str.StreamID()] = hstr
- c.lastStreamID = str.StreamID()
- c.streamMx.Unlock()
- rsp := &http.Response{}
- trace := httptrace.ContextClientTrace(ctx)
- return newRequestStream(
- newStream(hstr, c, trace, func(r io.Reader, hf *headersFrame) error {
- hdr, err := c.decodeTrailers(r, str.StreamID(), hf, maxHeaderBytes)
- if err != nil {
- return err
- }
- rsp.Trailer = hdr
- return nil
- }, c.qlogger),
- requestWriter,
- reqDone,
- c.decoder,
- disableCompression,
- maxHeaderBytes,
- rsp,
- ), nil
+ return len(c.streams) > 0
}
-func (c *Conn) decodeTrailers(r io.Reader, streamID quic.StreamID, hf *headersFrame, maxHeaderBytes int) (http.Header, error) {
- if hf.Length > uint64(maxHeaderBytes) {
- maybeQlogInvalidHeadersFrame(c.qlogger, streamID, hf.Length)
- return nil, fmt.Errorf("http3: HEADERS frame too large: %d bytes (max: %d)", hf.Length, maxHeaderBytes)
- }
-
- b := make([]byte, hf.Length)
- if _, err := io.ReadFull(r, b); err != nil {
- return nil, err
- }
- decodeFn := c.decoder.Decode(b)
- var fields []qpack.HeaderField
- if c.qlogger != nil {
- fields = make([]qpack.HeaderField, 0, 16)
- }
- trailers, err := parseTrailers(decodeFn, &fields)
- if err != nil {
- maybeQlogInvalidHeadersFrame(c.qlogger, streamID, hf.Length)
- return nil, err
- }
- if c.qlogger != nil {
- qlogParsedHeadersFrame(c.qlogger, streamID, hf, fields)
- }
- return trailers, nil
+func (c *rawConn) CloseWithError(code quic.ApplicationErrorCode, msg string) error {
+ return c.conn.CloseWithError(code, msg)
}
-// only used by the server
-func (c *Conn) acceptStream(ctx context.Context) (*stateTrackingStream, error) {
- str, err := c.conn.AcceptStream(ctx)
+func (c *rawConn) handleUnidirectionalStream(str *quic.ReceiveStream, isServer bool) {
+ c.qloggerWG.Add(1)
+ defer c.qloggerWG.Done()
+
+ streamType, err := quicvarint.Read(quicvarint.NewReader(str))
if err != nil {
- return nil, err
- }
- strID := str.StreamID()
- hstr := newStateTrackingStream(str, c, func(b []byte) error { return c.sendDatagram(strID, b) })
- c.streamMx.Lock()
- c.streams[strID] = hstr
- if c.idleTimeout > 0 {
- if len(c.streams) == 1 {
- c.idleTimer.Stop()
+ if c.logger != nil {
+ c.logger.Debug("reading stream type on stream failed", "stream ID", str.StreamID(), "error", err)
}
+ return
}
- c.streamMx.Unlock()
- return hstr, nil
-}
-
-func (c *Conn) CloseWithError(code quic.ApplicationErrorCode, msg string) error {
- if c.idleTimer != nil {
- c.idleTimer.Stop()
- }
- return c.conn.CloseWithError(code, msg)
-}
-
-func (c *Conn) handleUnidirectionalStreams(hijack func(StreamType, quic.ConnectionTracingID, *quic.ReceiveStream, error) (hijacked bool)) {
- var (
- rcvdControlStr atomic.Bool
- rcvdQPACKEncoderStr atomic.Bool
- rcvdQPACKDecoderStr atomic.Bool
- )
-
- for {
- str, err := c.conn.AcceptUniStream(context.Background())
- if err != nil {
- if c.logger != nil {
- c.logger.Debug("accepting unidirectional stream failed", "error", err)
- }
- return
+ // We're only interested in the control stream here.
+ switch streamType {
+ case streamTypeControlStream:
+ case streamTypeQPACKEncoderStream:
+ if isFirst := c.rcvdQPACKEncoderStr.CompareAndSwap(false, true); !isFirst {
+ c.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate QPACK encoder stream")
}
-
- go func(str *quic.ReceiveStream) {
- streamType, err := quicvarint.Read(quicvarint.NewReader(str))
- if err != nil {
- id := c.Context().Value(quic.ConnectionTracingKey).(quic.ConnectionTracingID)
- if hijack != nil && hijack(StreamType(streamType), id, str, err) {
- return
- }
- if c.logger != nil {
- c.logger.Debug("reading stream type on stream failed", "stream ID", str.StreamID(), "error", err)
- }
- return
- }
- // We're only interested in the control stream here.
- switch streamType {
- case streamTypeControlStream:
- case streamTypeQPACKEncoderStream:
- if isFirst := rcvdQPACKEncoderStr.CompareAndSwap(false, true); !isFirst {
- c.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate QPACK encoder stream")
- }
- // Our QPACK implementation doesn't use the dynamic table yet.
- return
- case streamTypeQPACKDecoderStream:
- if isFirst := rcvdQPACKDecoderStr.CompareAndSwap(false, true); !isFirst {
- c.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate QPACK decoder stream")
- }
- // Our QPACK implementation doesn't use the dynamic table yet.
- return
- case streamTypePushStream:
- if c.isServer {
- // only the server can push
- c.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "")
- } else {
- // we never increased the Push ID, so we don't expect any push streams
- c.CloseWithError(quic.ApplicationErrorCode(ErrCodeIDError), "")
- }
- return
- default:
- if hijack != nil {
- if hijack(
- StreamType(streamType),
- c.Context().Value(quic.ConnectionTracingKey).(quic.ConnectionTracingID),
- str,
- nil,
- ) {
- return
- }
- }
- str.CancelRead(quic.StreamErrorCode(ErrCodeStreamCreationError))
- return
- }
- // Only a single control stream is allowed.
- if isFirstControlStr := rcvdControlStr.CompareAndSwap(false, true); !isFirstControlStr {
- c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate control stream")
- return
- }
- c.handleControlStream(str)
- }(str)
+ // Our QPACK implementation doesn't use the dynamic table yet.
+ return
+ case streamTypeQPACKDecoderStream:
+ if isFirst := c.rcvdQPACKDecoderStr.CompareAndSwap(false, true); !isFirst {
+ c.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate QPACK decoder stream")
+ }
+ // Our QPACK implementation doesn't use the dynamic table yet.
+ return
+ case streamTypePushStream:
+ if isServer {
+ // only the server can push
+ c.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "")
+ } else {
+ // we never increased the Push ID, so we don't expect any push streams
+ c.CloseWithError(quic.ApplicationErrorCode(ErrCodeIDError), "")
+ }
+ return
+ default:
+ str.CancelRead(quic.StreamErrorCode(ErrCodeStreamCreationError))
+ return
}
+ // Only a single control stream is allowed.
+ if isFirstControlStr := c.rcvdControlStr.CompareAndSwap(false, true); !isFirstControlStr {
+ c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeStreamCreationError), "duplicate control stream")
+ return
+ }
+ c.handleControlStream(str)
}
-func (c *Conn) handleControlStream(str *quic.ReceiveStream) {
+func (c *rawConn) handleControlStream(str *quic.ReceiveStream) {
fp := &frameParser{closeConn: c.conn.CloseWithError, r: str, streamID: str.StreamID()}
f, err := fp.ParseNext(c.qlogger)
if err != nil {
@@ -348,11 +229,13 @@ func (c *Conn) handleControlStream(str *quic.ReceiveStream) {
// If datagram support was enabled on our side as well as on the server side,
// we can expect it to have been negotiated both on the transport and on the HTTP/3 layer.
// Note: ConnectionState() will block until the handshake is complete (relevant when using 0-RTT).
- if c.enableDatagrams && !c.ConnectionState().SupportsDatagrams {
+ if c.enableDatagrams && !c.ConnectionState().SupportsDatagrams.Remote {
c.CloseWithError(quic.ApplicationErrorCode(ErrCodeSettingsError), "missing QUIC Datagram support")
return
}
+ c.qloggerWG.Add(1)
go func() {
+ defer c.qloggerWG.Done()
if err := c.receiveDatagrams(); err != nil {
if c.logger != nil {
c.logger.Debug("receiving datagrams failed", "error", err)
@@ -361,53 +244,12 @@ func (c *Conn) handleControlStream(str *quic.ReceiveStream) {
}()
}
- // we don't support server push, hence we don't expect any GOAWAY frames from the client
- if c.isServer {
- return
- }
-
- for {
- f, err := fp.ParseNext(c.qlogger)
- if err != nil {
- var serr *quic.StreamError
- if err == io.EOF || errors.As(err, &serr) {
- c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeClosedCriticalStream), "")
- return
- }
- c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameError), "")
- return
- }
- // GOAWAY is the only frame allowed at this point:
- // * unexpected frames are ignored by the frame parser
- // * we don't support any extension that might add support for more frames
- goaway, ok := f.(*goAwayFrame)
- if !ok {
- c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "")
- return
- }
- if goaway.StreamID%4 != 0 { // client-initiated, bidirectional streams
- c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeIDError), "")
- return
- }
- c.streamMx.Lock()
- if c.maxStreamID != invalidStreamID && goaway.StreamID > c.maxStreamID {
- c.streamMx.Unlock()
- c.conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeIDError), "")
- return
- }
- c.maxStreamID = goaway.StreamID
- hasActiveStreams := len(c.streams) > 0
- c.streamMx.Unlock()
-
- // immediately close the connection if there are currently no active requests
- if !hasActiveStreams {
- c.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "")
- return
- }
+ if c.controlStrHandler != nil {
+ c.controlStrHandler(str, fp)
}
}
-func (c *Conn) sendDatagram(streamID quic.StreamID, b []byte) error {
+func (c *rawConn) sendDatagram(streamID quic.StreamID, b []byte) error {
// TODO: this creates a lot of garbage and an additional copy
data := make([]byte, 0, len(b)+8)
quarterStreamID := uint64(streamID / 4)
@@ -425,7 +267,7 @@ func (c *Conn) sendDatagram(streamID quic.StreamID, b []byte) error {
return c.conn.SendDatagram(data)
}
-func (c *Conn) receiveDatagrams() error {
+func (c *rawConn) receiveDatagrams() error {
for {
b, err := c.conn.ReceiveDatagram(context.Background())
if err != nil {
@@ -462,11 +304,18 @@ func (c *Conn) receiveDatagrams() error {
// ReceivedSettings returns a channel that is closed once the peer's SETTINGS frame was received.
// Settings can be optained from the Settings method after the channel was closed.
-func (c *Conn) ReceivedSettings() <-chan struct{} { return c.receivedSettings }
+func (c *rawConn) ReceivedSettings() <-chan struct{} { return c.receivedSettings }
// Settings returns the settings received on this connection.
// It is only valid to call this function after the channel returned by ReceivedSettings was closed.
-func (c *Conn) Settings() *Settings { return c.settings }
+func (c *rawConn) Settings() *Settings { return c.settings }
-// Context returns the context of the underlying QUIC connection.
-func (c *Conn) Context() context.Context { return c.ctx }
+// closeQlogger waits for all goroutines that may produce qlog events to finish,
+// then closes the qlogger.
+func (c *rawConn) closeQlogger() {
+ if c.qlogger == nil {
+ return
+ }
+ c.qloggerWG.Wait()
+ c.qlogger.Close()
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/frames.go b/vendor/github.com/quic-go/quic-go/http3/frames.go
index 879a9f1b2..4a0a9e65a 100644
--- a/vendor/github.com/quic-go/quic-go/http3/frames.go
+++ b/vendor/github.com/quic-go/quic-go/http3/frames.go
@@ -16,11 +16,11 @@ import (
// FrameType is the frame type of a HTTP/3 frame
type FrameType uint64
-type unknownFrameHandlerFunc func(FrameType, error) (processed bool, err error)
-
type frame any
-var errHijacked = errors.New("hijacked")
+// The maximum length of an encoded HTTP/3 frame header is 16:
+// The frame has a type and length field, both QUIC varints (maximum 8 bytes in length)
+const frameHeaderLen = 16
type countingByteReader struct {
quicvarint.Reader
@@ -46,10 +46,9 @@ func (r *countingByteReader) Reset() {
}
type frameParser struct {
- r io.Reader
- streamID quic.StreamID
- closeConn func(quic.ApplicationErrorCode, string) error
- unknownFrameHandler unknownFrameHandlerFunc
+ r io.Reader
+ streamID quic.StreamID
+ closeConn func(quic.ApplicationErrorCode, string) error
}
func (p *frameParser) ParseNext(qlogger qlogwriter.Recorder) (frame, error) {
@@ -57,28 +56,8 @@ func (p *frameParser) ParseNext(qlogger qlogwriter.Recorder) (frame, error) {
for {
t, err := quicvarint.Read(r)
if err != nil {
- if p.unknownFrameHandler != nil {
- hijacked, err := p.unknownFrameHandler(0, err)
- if err != nil {
- return nil, err
- }
- if hijacked {
- return nil, errHijacked
- }
- }
return nil, err
}
- // Call the unknownFrameHandler for frames not defined in the HTTP/3 spec
- if t > 0xd && p.unknownFrameHandler != nil {
- hijacked, err := p.unknownFrameHandler(FrameType(t), nil)
- if err != nil {
- return nil, err
- }
- if hijacked {
- return nil, errHijacked
- }
- // If the unknownFrameHandler didn't process the frame, it is our responsibility to skip it.
- }
l, err := quicvarint.Read(r)
if err != nil {
return nil, err
diff --git a/vendor/github.com/quic-go/quic-go/http3/headers.go b/vendor/github.com/quic-go/quic-go/http3/headers.go
index 2e6d4a51c..111c5aeaa 100644
--- a/vendor/github.com/quic-go/quic-go/http3/headers.go
+++ b/vendor/github.com/quic-go/quic-go/http3/headers.go
@@ -1,6 +1,7 @@
package http3
import (
+ "bytes"
"errors"
"fmt"
"io"
@@ -13,6 +14,9 @@ import (
"golang.org/x/net/http/httpguts"
"github.com/quic-go/qpack"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3/qlog"
+ "github.com/quic-go/quic-go/qlogwriter"
)
type qpackError struct{ err error }
@@ -232,7 +236,7 @@ func requestFromHeaders(decodeFn qpack.DecodeFunc, sizeLimit int, headerFields *
requestURI = hdr.Path
}
- return &http.Request{
+ req := &http.Request{
Method: hdr.Method,
URL: u,
Proto: protocol,
@@ -243,7 +247,9 @@ func requestFromHeaders(decodeFn qpack.DecodeFunc, sizeLimit int, headerFields *
ContentLength: hdr.ContentLength,
Host: hdr.Authority,
RequestURI: requestURI,
- }, nil
+ }
+ req.Trailer = extractAnnouncedTrailers(req.Header)
+ return req, nil
}
// updateResponseFromHeaders sets up http.Response as an HTTP/3 response,
@@ -261,7 +267,7 @@ func updateResponseFromHeaders(rsp *http.Response, decodeFn qpack.DecodeFunc, si
rsp.Proto = "HTTP/3.0"
rsp.ProtoMajor = 3
rsp.Header = hdr.Headers
- processTrailers(rsp)
+ rsp.Trailer = extractAnnouncedTrailers(rsp.Header)
rsp.ContentLength = hdr.ContentLength
status, err := strconv.Atoi(hdr.Status)
@@ -273,26 +279,102 @@ func updateResponseFromHeaders(rsp *http.Response, decodeFn qpack.DecodeFunc, si
return nil
}
-// processTrailers initializes the rsp.Trailer map, and adds keys for every announced header value.
-// The Trailer header is removed from the http.Response.Header map.
+// extractAnnouncedTrailers extracts trailer keys from the "Trailer" header.
+// It returns a map with the announced keys set to nil values, and removes the "Trailer" header.
// It handles both duplicate as well as comma-separated values for the Trailer header.
// For example:
//
// Trailer: Trailer1, Trailer2
// Trailer: Trailer3
//
-// Will result in a http.Response.Trailer map containing the keys "Trailer1", "Trailer2", "Trailer3".
-func processTrailers(rsp *http.Response) {
- rawTrailers, ok := rsp.Header["Trailer"]
+// Will result in a map containing the keys "Trailer1", "Trailer2", "Trailer3" with nil values.
+func extractAnnouncedTrailers(header http.Header) http.Header {
+ rawTrailers, ok := header["Trailer"]
if !ok {
- return
+ return nil
}
- rsp.Trailer = make(http.Header)
+ trailers := make(http.Header)
for _, rawVal := range rawTrailers {
for _, val := range strings.Split(rawVal, ",") {
- rsp.Trailer[http.CanonicalHeaderKey(textproto.TrimString(val))] = nil
+ trailers[http.CanonicalHeaderKey(textproto.TrimString(val))] = nil
+ }
+ }
+ delete(header, "Trailer")
+ return trailers
+}
+
+// writeTrailers encodes and writes HTTP trailers as a HEADERS frame.
+// It returns true if trailers were written, false if there were no trailers to write.
+func writeTrailers(wr io.Writer, trailers http.Header, streamID quic.StreamID, qlogger qlogwriter.Recorder) (bool, error) {
+ var hasValues bool
+ for k, vals := range trailers {
+ if httpguts.ValidTrailerHeader(k) && len(vals) > 0 {
+ hasValues = true
+ break
+ }
+ }
+ if !hasValues {
+ return false, nil
+ }
+
+ var buf bytes.Buffer
+ enc := qpack.NewEncoder(&buf)
+ var headerFields []qlog.HeaderField
+ if qlogger != nil {
+ headerFields = make([]qlog.HeaderField, 0, len(trailers))
+ }
+
+ for k, vals := range trailers {
+ if len(vals) == 0 {
+ continue
+ }
+ if !httpguts.ValidTrailerHeader(k) {
+ continue
+ }
+ lowercaseKey := strings.ToLower(k)
+ for _, v := range vals {
+ if err := enc.WriteField(qpack.HeaderField{Name: lowercaseKey, Value: v}); err != nil {
+ return false, err
+ }
+ if qlogger != nil {
+ headerFields = append(headerFields, qlog.HeaderField{Name: lowercaseKey, Value: v})
+ }
}
}
- delete(rsp.Header, "Trailer")
+
+ b := make([]byte, 0, frameHeaderLen+buf.Len())
+ b = (&headersFrame{Length: uint64(buf.Len())}).Append(b)
+ b = append(b, buf.Bytes()...)
+ if qlogger != nil {
+ qlogCreatedHeadersFrame(qlogger, streamID, len(b), buf.Len(), headerFields)
+ }
+ _, err := wr.Write(b)
+ return true, err
+}
+
+func decodeTrailers(r io.Reader, hf *headersFrame, maxHeaderBytes int, decoder *qpack.Decoder, qlogger qlogwriter.Recorder, streamID quic.StreamID) (http.Header, error) {
+ if hf.Length > uint64(maxHeaderBytes) {
+ maybeQlogInvalidHeadersFrame(qlogger, streamID, hf.Length)
+ return nil, fmt.Errorf("http3: HEADERS frame too large: %d bytes (max: %d)", hf.Length, maxHeaderBytes)
+ }
+
+ b := make([]byte, hf.Length)
+ if _, err := io.ReadFull(r, b); err != nil {
+ return nil, err
+ }
+ decodeFn := decoder.Decode(b)
+ var fields []qpack.HeaderField
+ if qlogger != nil {
+ fields = make([]qpack.HeaderField, 0, 16)
+ }
+ trailers, err := parseTrailers(decodeFn, &fields)
+ if err != nil {
+ maybeQlogInvalidHeadersFrame(qlogger, streamID, hf.Length)
+ return nil, err
+ }
+ if qlogger != nil {
+ qlogParsedHeadersFrame(qlogger, streamID, hf, fields)
+ }
+ return trailers, nil
}
diff --git a/vendor/github.com/quic-go/quic-go/http3/request_writer.go b/vendor/github.com/quic-go/quic-go/http3/request_writer.go
index 9737fd2ae..4e03bd22a 100644
--- a/vendor/github.com/quic-go/quic-go/http3/request_writer.go
+++ b/vendor/github.com/quic-go/quic-go/http3/request_writer.go
@@ -40,7 +40,6 @@ func newRequestWriter() *requestWriter {
}
func (w *requestWriter) WriteRequestHeader(wr io.Writer, req *http.Request, gzip bool, streamID quic.StreamID, qlogger qlogwriter.Recorder) error {
- // TODO: figure out how to add support for trailers
buf := &bytes.Buffer{}
if err := w.writeHeaders(buf, req, gzip, streamID, qlogger); err != nil {
return err
@@ -59,7 +58,18 @@ func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool,
defer w.encoder.Close()
defer w.headerBuf.Reset()
- headerFields, err := w.encodeHeaders(req, gzip, "", actualContentLength(req), qlogger != nil)
+ var trailers string
+ if len(req.Trailer) > 0 {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ if httpguts.ValidTrailerHeader(k) {
+ keys = append(keys, k)
+ }
+ }
+ trailers = strings.Join(keys, ", ")
+ }
+
+ headerFields, err := w.encodeHeaders(req, gzip, trailers, actualContentLength(req), qlogger != nil)
if err != nil {
return err
}
@@ -302,3 +312,10 @@ func shouldSendReqContentLength(method string, contentLength int64) bool {
return false
}
}
+
+// WriteRequestTrailer writes HTTP trailers to the stream.
+// It should be called after the request body has been fully written.
+func (w *requestWriter) WriteRequestTrailer(wr io.Writer, req *http.Request, streamID quic.StreamID, qlogger qlogwriter.Recorder) error {
+ _, err := writeTrailers(wr, req.Trailer, streamID, qlogger)
+ return err
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/response_writer.go b/vendor/github.com/quic-go/quic-go/http3/response_writer.go
index ed22ca246..f80de236b 100644
--- a/vendor/github.com/quic-go/quic-go/http3/response_writer.go
+++ b/vendor/github.com/quic-go/quic-go/http3/response_writer.go
@@ -22,16 +22,12 @@ type HTTPStreamer interface {
HTTPStream() *Stream
}
-// The maximum length of an encoded HTTP/3 frame header is 16:
-// The frame has a type and length field, both QUIC varints (maximum 8 bytes in length)
-const frameHeaderLen = 16
-
const maxSmallResponseSize = 4096
type responseWriter struct {
str *Stream
- conn *Conn
+ conn *rawConn
header http.Header
trailers map[string]struct{}
buf []byte
@@ -56,7 +52,7 @@ type responseWriter struct {
var (
_ http.ResponseWriter = &responseWriter{}
_ http.Flusher = &responseWriter{}
- _ Hijacker = &responseWriter{}
+ _ Settingser = &responseWriter{}
_ HTTPStreamer = &responseWriter{}
// make sure that we implement (some of the) methods used by the http.ResponseController
_ interface {
@@ -67,7 +63,7 @@ var (
} = &responseWriter{}
)
-func newResponseWriter(str *Stream, conn *Conn, isHead bool, logger *slog.Logger) *responseWriter {
+func newResponseWriter(str *Stream, conn *rawConn, isHead bool, logger *slog.Logger) *responseWriter {
return &responseWriter{
str: str,
conn: conn,
@@ -304,59 +300,31 @@ func (w *responseWriter) declareTrailer(k string) {
w.trailers[k] = struct{}{}
}
-// hasNonEmptyTrailers checks to see if there are any trailers with an actual
-// value set. This is possible by adding trailers to the "Trailers" header
-// but never actually setting those names as trailers in the course of handling
-// the request. In that case, this check may save us some allocations.
-func (w *responseWriter) hasNonEmptyTrailers() bool {
- for trailer := range w.trailers {
- if _, ok := w.header[trailer]; ok {
- return true
- }
- }
- return false
-}
-
// writeTrailers will write trailers to the stream if there are any.
func (w *responseWriter) writeTrailers() error {
// promote headers added via "Trailer:" convention as trailers, these can be added after
// streaming the status/headers have been written.
for k := range w.header {
- // Handle "Trailer:" prefix
if strings.HasPrefix(k, http.TrailerPrefix) {
w.declareTrailer(k)
}
}
- if !w.hasNonEmptyTrailers() {
+ if len(w.trailers) == 0 {
return nil
}
- var b bytes.Buffer
- var headerFields []qlog.HeaderField
- enc := qpack.NewEncoder(&b)
+ trailers := make(http.Header, len(w.trailers))
for trailer := range w.trailers {
- trailerName := strings.ToLower(strings.TrimPrefix(trailer, http.TrailerPrefix))
if vals, ok := w.header[trailer]; ok {
- for _, val := range vals {
- if err := enc.WriteField(qpack.HeaderField{Name: trailerName, Value: val}); err != nil {
- return err
- }
- if w.str.qlogger != nil {
- headerFields = append(headerFields, qlog.HeaderField{Name: trailerName, Value: val})
- }
- }
+ trailers[strings.TrimPrefix(trailer, http.TrailerPrefix)] = vals
}
}
- buf := make([]byte, 0, frameHeaderLen+b.Len())
- buf = (&headersFrame{Length: uint64(b.Len())}).Append(buf)
- buf = append(buf, b.Bytes()...)
- if w.str.qlogger != nil {
- qlogCreatedHeadersFrame(w.str.qlogger, w.str.StreamID(), len(buf), b.Len(), headerFields)
+ written, err := writeTrailers(w.str.datagramStream, trailers, w.str.StreamID(), w.str.qlogger)
+ if written {
+ w.trailerWritten = true
}
- _, err := w.str.writeUnframed(buf)
- w.trailerWritten = true
return err
}
@@ -368,8 +336,12 @@ func (w *responseWriter) HTTPStream() *Stream {
func (w *responseWriter) wasStreamHijacked() bool { return w.hijacked }
-func (w *responseWriter) Connection() *Conn {
- return w.conn
+func (w *responseWriter) ReceivedSettings() <-chan struct{} {
+ return w.conn.ReceivedSettings()
+}
+
+func (w *responseWriter) Settings() *Settings {
+ return w.conn.Settings()
}
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
diff --git a/vendor/github.com/quic-go/quic-go/http3/server.go b/vendor/github.com/quic-go/quic-go/http3/server.go
index 87720e43a..9f4d8af80 100644
--- a/vendor/github.com/quic-go/quic-go/http3/server.go
+++ b/vendor/github.com/quic-go/quic-go/http3/server.go
@@ -7,12 +7,9 @@ import (
"fmt"
"io"
"log/slog"
- "maps"
"net"
"net/http"
- "runtime"
"slices"
- "strconv"
"strings"
"sync"
"sync/atomic"
@@ -21,9 +18,6 @@ import (
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3/qlog"
"github.com/quic-go/quic-go/qlogwriter"
- "github.com/quic-go/quic-go/quicvarint"
-
- "github.com/quic-go/qpack"
)
// NextProtoH3 is the ALPN protocol negotiated during the TLS handshake, for QUIC v1 and v2.
@@ -151,20 +145,6 @@ type Server struct {
// It is invalid to specify any settings defined by RFC 9114 (HTTP/3) and RFC 9297 (HTTP Datagrams).
AdditionalSettings map[uint64]uint64
- // StreamHijacker, when set, is called for the first unknown frame parsed on a bidirectional stream.
- // It is called right after parsing the frame type.
- // If parsing the frame type fails, the error is passed to the callback.
- // In that case, the frame type will not be set.
- // Callers can either ignore the frame and return control of the stream back to HTTP/3
- // (by returning hijacked false).
- // Alternatively, callers can take over the QUIC stream (by returning hijacked true).
- StreamHijacker func(FrameType, quic.ConnectionTracingID, *quic.Stream, error) (hijacked bool, err error)
-
- // UniStreamHijacker, when set, is called for unknown unidirectional stream of unknown stream type.
- // If parsing the stream type fails, the error is passed to the callback.
- // In that case, the stream type will not be set.
- UniStreamHijacker func(StreamType, quic.ConnectionTracingID, *quic.ReceiveStream, error) (hijacked bool)
-
// IdleTimeout specifies how long until idle clients connection should be
// closed. Idle refers only to the HTTP/3 layer, activity at the QUIC layer
// like PING frames are not considered.
@@ -437,45 +417,19 @@ func (s *Server) removeListener(l *QUICListener) {
s.generateAltSvcHeader()
}
-// handleConn handles the HTTP/3 exchange on a QUIC connection.
-// It blocks until all HTTP handlers for all streams have returned.
-func (s *Server) handleConn(conn *quic.Conn) error {
+func (s *Server) NewRawServerConn(conn *quic.Conn) (*RawServerConn, error) {
+ hconn, _, _, err := s.newRawServerConn(conn)
+ if err != nil {
+ return nil, err
+ }
+ return hconn, nil
+}
+
+func (s *Server) newRawServerConn(conn *quic.Conn) (*RawServerConn, *quic.SendStream, qlogwriter.Recorder, error) {
var qlogger qlogwriter.Recorder
if qlogTrace := conn.QlogTrace(); qlogTrace != nil && qlogTrace.SupportsSchemas(qlog.EventSchema) {
qlogger = qlogTrace.AddProducer()
}
-
- // open the control stream and send a SETTINGS frame, it's also used to send a GOAWAY frame later
- // when the server is gracefully closed
- ctrlStr, err := conn.OpenUniStream()
- if err != nil {
- return fmt.Errorf("opening the control stream failed: %w", err)
- }
- b := make([]byte, 0, 64)
- b = quicvarint.Append(b, streamTypeControlStream) // stream type
- b = (&settingsFrame{
- MaxFieldSectionSize: int64(s.maxHeaderBytes()),
- Datagram: s.EnableDatagrams,
- ExtendedConnect: true,
- Other: s.AdditionalSettings,
- }).Append(b)
- if qlogger != nil {
- sf := qlog.SettingsFrame{
- MaxFieldSectionSize: int64(s.maxHeaderBytes()),
- ExtendedConnect: pointer(true),
- Other: maps.Clone(s.AdditionalSettings),
- }
- if s.EnableDatagrams {
- sf.Datagram = pointer(true)
- }
- qlogger.RecordEvent(qlog.FrameCreated{
- StreamID: ctrlStr.StreamID(),
- Raw: qlog.RawInfo{Length: len(b)},
- Frame: qlog.Frame{Frame: sf},
- })
- }
- ctrlStr.Write(b)
-
connCtx := conn.Context()
connCtx = context.WithValue(connCtx, ServerContextKey, s)
connCtx = context.WithValue(connCtx, http.LocalAddrContextKey, conn.LocalAddr())
@@ -486,19 +440,54 @@ func (s *Server) handleConn(conn *quic.Conn) error {
panic("http3: ConnContext returned nil")
}
}
-
- hconn := newConnection(
- connCtx,
+ hconn := newRawServerConn(
conn,
s.EnableDatagrams,
- true, // server
- s.Logger,
s.IdleTimeout,
+ qlogger,
+ s.Logger,
+ connCtx,
+ s.Handler,
+ s.maxHeaderBytes(),
)
- go hconn.handleUnidirectionalStreams(s.UniStreamHijacker)
- var nextStreamID quic.StreamID
+ // open the control stream and send a SETTINGS frame, it's also used to send a GOAWAY frame later
+ // when the server is gracefully closed
+ ctrlStr, err := hconn.openControlStream(&settingsFrame{
+ MaxFieldSectionSize: int64(s.maxHeaderBytes()),
+ Datagram: s.EnableDatagrams,
+ ExtendedConnect: true,
+ Other: s.AdditionalSettings,
+ })
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("opening the control stream failed: %w", err)
+ }
+ return hconn, ctrlStr, qlogger, nil
+}
+
+// handleConn handles the HTTP/3 exchange on a QUIC connection.
+// It blocks until all HTTP handlers for all streams have returned.
+func (s *Server) handleConn(conn *quic.Conn) error {
+ hconn, ctrlStr, qlogger, err := s.newRawServerConn(conn)
+ if err != nil {
+ return err
+ }
+
var wg sync.WaitGroup
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ str, err := conn.AcceptUniStream(context.Background())
+ if err != nil {
+ return
+ }
+ go hconn.HandleUnidirectionalStream(str)
+ }
+ }()
+
+ var nextStreamID quic.StreamID
var handleErr error
var inGracefulShutdown bool
// Process all requests immediately.
@@ -509,10 +498,10 @@ func (s *Server) handleConn(conn *quic.Conn) error {
// * before graceful shutdown: s.graceCtx
// * after graceful shutdown: s.closeCtx
// This allows us to keep accepting (and resetting) streams after graceful shutdown has started.
- str, err := hconn.acceptStream(ctx)
+ str, err := conn.AcceptStream(ctx)
if err != nil {
// the underlying connection was closed (by either side)
- if hconn.Context().Err() != nil {
+ if conn.Context().Err() != nil {
var appErr *quic.ApplicationError
if !errors.As(err, &appErr) || appErr.ErrorCode != quic.ApplicationErrorCode(ErrCodeNoError) {
handleErr = fmt.Errorf("accepting stream failed: %w", err)
@@ -521,7 +510,7 @@ func (s *Server) handleConn(conn *quic.Conn) error {
}
// server (not gracefully) closed, close the connection immediately
if s.closeCtx.Err() != nil {
- conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "")
+ hconn.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "")
handleErr = http.ErrServerClosed
break
}
@@ -562,10 +551,10 @@ func (s *Server) handleConn(conn *quic.Conn) error {
nextStreamID = str.StreamID() + 4
wg.Add(1)
go func() {
- // handleRequest will return once the request has been handled,
- // or the underlying connection is closed
+ // HandleRequestStream will return once the request has been handled,
+ // or the underlying connection is closed.
defer wg.Done()
- s.handleRequest(hconn, str, hconn.decoder, qlogger)
+ hconn.HandleRequestStream(str)
}()
}
wg.Wait()
@@ -579,164 +568,6 @@ func (s *Server) maxHeaderBytes() int {
return s.MaxHeaderBytes
}
-func (s *Server) handleRequest(
- conn *Conn,
- str *stateTrackingStream,
- decoder *qpack.Decoder,
- qlogger qlogwriter.Recorder,
-) {
- var ufh unknownFrameHandlerFunc
- if s.StreamHijacker != nil {
- ufh = func(ft FrameType, e error) (processed bool, err error) {
- return s.StreamHijacker(
- ft,
- conn.Context().Value(quic.ConnectionTracingKey).(quic.ConnectionTracingID),
- str.QUICStream(),
- e,
- )
- }
- }
- fp := &frameParser{closeConn: conn.CloseWithError, r: str, unknownFrameHandler: ufh}
- frame, err := fp.ParseNext(qlogger)
- if err != nil {
- if !errors.Is(err, errHijacked) {
- str.CancelRead(quic.StreamErrorCode(ErrCodeRequestIncomplete))
- str.CancelWrite(quic.StreamErrorCode(ErrCodeRequestIncomplete))
- }
- return
- }
- hf, ok := frame.(*headersFrame)
- if !ok {
- conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "expected first frame to be a HEADERS frame")
- return
- }
- if hf.Length > uint64(s.maxHeaderBytes()) {
- maybeQlogInvalidHeadersFrame(qlogger, str.StreamID(), hf.Length)
- // stop the client from sending more data
- str.CancelRead(quic.StreamErrorCode(ErrCodeExcessiveLoad))
- // send a 431 Response (Request Header Fields Too Large)
- s.rejectWithHeaderFieldsTooLarge(str, conn, qlogger)
- return
- }
- headerBlock := make([]byte, hf.Length)
- if _, err := io.ReadFull(str, headerBlock); err != nil {
- maybeQlogInvalidHeadersFrame(qlogger, str.StreamID(), hf.Length)
- str.CancelRead(quic.StreamErrorCode(ErrCodeRequestIncomplete))
- str.CancelWrite(quic.StreamErrorCode(ErrCodeRequestIncomplete))
- return
- }
- decodeFn := decoder.Decode(headerBlock)
- var hfs []qpack.HeaderField
- if qlogger != nil {
- hfs = make([]qpack.HeaderField, 0, 16)
- }
- req, err := requestFromHeaders(decodeFn, s.maxHeaderBytes(), &hfs)
- if qlogger != nil {
- qlogParsedHeadersFrame(qlogger, str.StreamID(), hf, hfs)
- }
- if err != nil {
- if errors.Is(err, errHeaderTooLarge) {
- // stop the client from sending more data
- str.CancelRead(quic.StreamErrorCode(ErrCodeExcessiveLoad))
- // send a 431 Response (Request Header Fields Too Large)
- s.rejectWithHeaderFieldsTooLarge(str, conn, qlogger)
- return
- }
-
- errCode := ErrCodeMessageError
- var qpackErr *qpackError
- if errors.As(err, &qpackErr) {
- errCode = ErrCodeQPACKDecompressionFailed
- }
- str.CancelRead(quic.StreamErrorCode(errCode))
- str.CancelWrite(quic.StreamErrorCode(errCode))
- return
- }
-
- connState := conn.ConnectionState().TLS
- req.TLS = &connState
- req.RemoteAddr = conn.RemoteAddr().String()
-
- // Check that the client doesn't send more data in DATA frames than indicated by the Content-Length header (if set).
- // See section 4.1.2 of RFC 9114.
- contentLength := int64(-1)
- if _, ok := req.Header["Content-Length"]; ok && req.ContentLength >= 0 {
- contentLength = req.ContentLength
- }
- hstr := newStream(str, conn, nil, nil, qlogger)
- body := newRequestBody(hstr, contentLength, conn.Context(), conn.ReceivedSettings(), conn.Settings)
- req.Body = body
-
- if s.Logger != nil {
- s.Logger.Debug("handling request", "method", req.Method, "host", req.Host, "uri", req.RequestURI)
- }
-
- ctx, cancel := context.WithCancel(conn.Context())
- req = req.WithContext(ctx)
- context.AfterFunc(str.Context(), cancel)
-
- r := newResponseWriter(hstr, conn, req.Method == http.MethodHead, s.Logger)
- handler := s.Handler
- if handler == nil {
- handler = http.DefaultServeMux
- }
-
- // It's the client's responsibility to decide which requests are eligible for 0-RTT.
- var panicked bool
- func() {
- defer func() {
- if p := recover(); p != nil {
- panicked = true
- if p == http.ErrAbortHandler {
- return
- }
- // Copied from net/http/server.go
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- logger := s.Logger
- if logger == nil {
- logger = slog.Default()
- }
- logger.Error("http3: panic serving", "arg", p, "trace", string(buf))
- }
- }()
- handler.ServeHTTP(r, req)
- }()
-
- if r.wasStreamHijacked() {
- return
- }
-
- // abort the stream when there is a panic
- if panicked {
- str.CancelRead(quic.StreamErrorCode(ErrCodeInternalError))
- str.CancelWrite(quic.StreamErrorCode(ErrCodeInternalError))
- return
- }
-
- // response not written to the client yet, set Content-Length
- if !r.headerWritten {
- if _, haveCL := r.header["Content-Length"]; !haveCL {
- r.header.Set("Content-Length", strconv.FormatInt(r.numWritten, 10))
- }
- }
- r.Flush()
- r.flushTrailers()
-
- // If the EOF was read by the handler, CancelRead() is a no-op.
- str.CancelRead(quic.StreamErrorCode(ErrCodeNoError))
- str.Close()
-}
-
-func (s *Server) rejectWithHeaderFieldsTooLarge(str *stateTrackingStream, conn *Conn, qlogger qlogwriter.Recorder) {
- hstr := newStream(str, conn, nil, nil, qlogger)
- defer hstr.Close()
- r := newResponseWriter(hstr, conn, false, s.Logger)
- r.WriteHeader(http.StatusRequestHeaderFieldsTooLarge)
- r.Flush()
-}
-
// Close the server immediately, aborting requests and sending CONNECTION_CLOSE frames to connected clients.
// Close in combination with ListenAndServe() (instead of Serve()) may race if it is called before a UDP socket is established.
// It is the caller's responsibility to close any connection passed to ServeQUICConn.
diff --git a/vendor/github.com/quic-go/quic-go/http3/server_conn.go b/vendor/github.com/quic-go/quic-go/http3/server_conn.go
new file mode 100644
index 000000000..e06eb64fc
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/server_conn.go
@@ -0,0 +1,261 @@
+package http3
+
+import (
+ "context"
+ "errors"
+ "io"
+ "log/slog"
+ "net/http"
+ "runtime"
+ "strconv"
+ "time"
+
+ "github.com/quic-go/qpack"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/qlogwriter"
+)
+
+// RawServerConn is an HTTP/3 server connection.
+// It can be used for advanced use cases where the application wants to manage the QUIC connection lifecycle.
+type RawServerConn struct {
+ rawConn rawConn
+
+ idleTimeout time.Duration
+ idleTimer *time.Timer
+
+ serverContext context.Context
+ requestHandler http.Handler
+ maxHeaderBytes int
+
+ decoder *qpack.Decoder
+
+ qlogger qlogwriter.Recorder
+ logger *slog.Logger
+}
+
+func newRawServerConn(
+ conn *quic.Conn,
+ enableDatagrams bool,
+ idleTimeout time.Duration,
+ qlogger qlogwriter.Recorder,
+ logger *slog.Logger,
+ serverContext context.Context,
+ requestHandler http.Handler,
+ maxHeaderBytes int,
+) *RawServerConn {
+ c := &RawServerConn{
+ idleTimeout: idleTimeout,
+ serverContext: serverContext,
+ requestHandler: requestHandler,
+ maxHeaderBytes: maxHeaderBytes,
+ decoder: qpack.NewDecoder(),
+ qlogger: qlogger,
+ logger: logger,
+ }
+ c.rawConn = *newRawConn(conn, enableDatagrams, c.onStreamsEmpty, nil, qlogger, logger)
+ if idleTimeout > 0 {
+ c.idleTimer = time.AfterFunc(idleTimeout, c.onIdleTimer)
+ }
+ return c
+}
+
+func (c *RawServerConn) onStreamsEmpty() {
+ if c.idleTimeout > 0 {
+ c.idleTimer.Reset(c.idleTimeout)
+ }
+}
+
+func (c *RawServerConn) onIdleTimer() {
+ c.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "idle timeout")
+}
+
+// CloseWithError closes the connection with the given error code and message.
+func (c *RawServerConn) CloseWithError(code quic.ApplicationErrorCode, msg string) error {
+ if c.idleTimer != nil {
+ c.idleTimer.Stop()
+ }
+ return c.rawConn.CloseWithError(code, msg)
+}
+
+// HandleRequestStream handles an HTTP/3 request on a bidirectional request stream.
+// The stream can either be obtained by calling AcceptStream on the underlying QUIC connection,
+// or (internally) by using the server's stream accept loop.
+func (c *RawServerConn) HandleRequestStream(str *quic.Stream) {
+ hstr := c.rawConn.TrackStream(str)
+ c.handleRequestStream(hstr)
+}
+
+func (c *RawServerConn) requestMaxHeaderBytes() int {
+ if c.maxHeaderBytes <= 0 {
+ return http.DefaultMaxHeaderBytes
+ }
+ return c.maxHeaderBytes
+}
+
+func (c *RawServerConn) openControlStream(settings *settingsFrame) (*quic.SendStream, error) {
+ return c.rawConn.openControlStream(settings)
+}
+
+func (c *RawServerConn) handleRequestStream(str *stateTrackingStream) {
+ if c.idleTimeout > 0 {
+ // This only applies if the stream is the first active stream,
+ // but it's ok to stop a stopped timer.
+ c.idleTimer.Stop()
+ }
+
+ conn := &c.rawConn
+ qlogger := c.qlogger
+ decoder := c.decoder
+ connCtx := c.serverContext
+ maxHeaderBytes := c.requestMaxHeaderBytes()
+
+ fp := &frameParser{closeConn: conn.CloseWithError, r: str, streamID: str.StreamID()}
+ frame, err := fp.ParseNext(qlogger)
+ if err != nil {
+ str.CancelRead(quic.StreamErrorCode(ErrCodeRequestIncomplete))
+ str.CancelWrite(quic.StreamErrorCode(ErrCodeRequestIncomplete))
+ return
+ }
+ hf, ok := frame.(*headersFrame)
+ if !ok {
+ conn.CloseWithError(quic.ApplicationErrorCode(ErrCodeFrameUnexpected), "expected first frame to be a HEADERS frame")
+ return
+ }
+ if hf.Length > uint64(maxHeaderBytes) {
+ maybeQlogInvalidHeadersFrame(qlogger, str.StreamID(), hf.Length)
+ // stop the client from sending more data
+ str.CancelRead(quic.StreamErrorCode(ErrCodeExcessiveLoad))
+ // send a 431 Response (Request Header Fields Too Large)
+ c.rejectWithHeaderFieldsTooLarge(str)
+ return
+ }
+ headerBlock := make([]byte, hf.Length)
+ if _, err := io.ReadFull(str, headerBlock); err != nil {
+ maybeQlogInvalidHeadersFrame(qlogger, str.StreamID(), hf.Length)
+ str.CancelRead(quic.StreamErrorCode(ErrCodeRequestIncomplete))
+ str.CancelWrite(quic.StreamErrorCode(ErrCodeRequestIncomplete))
+ return
+ }
+ decodeFn := decoder.Decode(headerBlock)
+ var hfs []qpack.HeaderField
+ if qlogger != nil {
+ hfs = make([]qpack.HeaderField, 0, 16)
+ }
+ req, err := requestFromHeaders(decodeFn, maxHeaderBytes, &hfs)
+ if qlogger != nil {
+ qlogParsedHeadersFrame(qlogger, str.StreamID(), hf, hfs)
+ }
+ if err != nil {
+ if errors.Is(err, errHeaderTooLarge) {
+ // stop the client from sending more data
+ str.CancelRead(quic.StreamErrorCode(ErrCodeExcessiveLoad))
+ // send a 431 Response (Request Header Fields Too Large)
+ c.rejectWithHeaderFieldsTooLarge(str)
+ return
+ }
+
+ errCode := ErrCodeMessageError
+ var qpackErr *qpackError
+ if errors.As(err, &qpackErr) {
+ errCode = ErrCodeQPACKDecompressionFailed
+ }
+ str.CancelRead(quic.StreamErrorCode(errCode))
+ str.CancelWrite(quic.StreamErrorCode(errCode))
+ return
+ }
+
+ connState := conn.ConnectionState().TLS
+ req.TLS = &connState
+ req.RemoteAddr = conn.RemoteAddr().String()
+
+ // Check that the client doesn't send more data in DATA frames than indicated by the Content-Length header (if set).
+ // See section 4.1.2 of RFC 9114.
+ contentLength := int64(-1)
+ if _, ok := req.Header["Content-Length"]; ok && req.ContentLength >= 0 {
+ contentLength = req.ContentLength
+ }
+ hstr := newStream(str, conn, nil, func(r io.Reader, hf *headersFrame) error {
+ trailers, err := decodeTrailers(r, hf, maxHeaderBytes, decoder, qlogger, str.StreamID())
+ if err != nil {
+ return err
+ }
+ req.Trailer = trailers
+ return nil
+ }, qlogger)
+ body := newRequestBody(hstr, contentLength, connCtx, conn.ReceivedSettings(), conn.Settings)
+ req.Body = body
+
+ if c.logger != nil {
+ c.logger.Debug("handling request", "method", req.Method, "host", req.Host, "uri", req.RequestURI)
+ }
+
+ ctx, cancel := context.WithCancel(connCtx)
+ req = req.WithContext(ctx)
+ context.AfterFunc(str.Context(), cancel)
+
+ r := newResponseWriter(hstr, conn, req.Method == http.MethodHead, c.logger)
+ handler := c.requestHandler
+ if handler == nil {
+ handler = http.DefaultServeMux
+ }
+
+ // It's the client's responsibility to decide which requests are eligible for 0-RTT.
+ var panicked bool
+ func() {
+ defer func() {
+ if p := recover(); p != nil {
+ panicked = true
+ if p == http.ErrAbortHandler {
+ return
+ }
+ // Copied from net/http/server.go
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ logger := c.logger
+ if logger == nil {
+ logger = slog.Default()
+ }
+ logger.Error("http3: panic serving", "arg", p, "trace", string(buf))
+ }
+ }()
+ handler.ServeHTTP(r, req)
+ }()
+
+ if r.wasStreamHijacked() {
+ return
+ }
+
+ // abort the stream when there is a panic
+ if panicked {
+ str.CancelRead(quic.StreamErrorCode(ErrCodeInternalError))
+ str.CancelWrite(quic.StreamErrorCode(ErrCodeInternalError))
+ return
+ }
+
+ // response not written to the client yet, set Content-Length
+ if !r.headerWritten {
+ if _, haveCL := r.header["Content-Length"]; !haveCL {
+ r.header.Set("Content-Length", strconv.FormatInt(r.numWritten, 10))
+ }
+ }
+ r.Flush()
+ r.flushTrailers()
+
+ // If the EOF was read by the handler, CancelRead() is a no-op.
+ str.CancelRead(quic.StreamErrorCode(ErrCodeNoError))
+ str.Close()
+}
+
+func (c *RawServerConn) rejectWithHeaderFieldsTooLarge(str *stateTrackingStream) {
+ hstr := newStream(str, &c.rawConn, nil, nil, c.qlogger)
+ defer hstr.Close()
+ r := newResponseWriter(hstr, &c.rawConn, false, c.logger)
+ r.WriteHeader(http.StatusRequestHeaderFieldsTooLarge)
+ r.Flush()
+}
+
+// HandleUnidirectionalStream handles an incoming unidirectional stream.
+func (c *RawServerConn) HandleUnidirectionalStream(str *quic.ReceiveStream) {
+ c.rawConn.handleUnidirectionalStream(str, true)
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/stream.go b/vendor/github.com/quic-go/quic-go/http3/stream.go
index 12204cac3..0d6d302a6 100644
--- a/vendor/github.com/quic-go/quic-go/http3/stream.go
+++ b/vendor/github.com/quic-go/quic-go/http3/stream.go
@@ -36,7 +36,7 @@ type datagramStream interface {
// When writing to and reading from the stream, data is framed in HTTP/3 DATA frames.
type Stream struct {
datagramStream
- conn *Conn
+ conn *rawConn
frameParser *frameParser
buf []byte // used as a temporary buffer when writing the HTTP/3 frame headers
@@ -51,7 +51,7 @@ type Stream struct {
func newStream(
str datagramStream,
- conn *Conn,
+ conn *rawConn,
trace *httptrace.ClientTrace,
parseTrailer func(io.Reader, *headersFrame) error,
qlogger qlogwriter.Recorder,
@@ -86,9 +86,6 @@ func (s *Stream) Read(b []byte) (int, error) {
s.bytesRemainingInFrame = f.Length
break parseLoop
case *headersFrame:
- if s.conn.isServer {
- continue
- }
if s.parsedTrailer {
maybeQlogInvalidHeadersFrame(s.qlogger, s.StreamID(), f.Length)
return 0, errors.New("additional HEADERS frame received after trailers")
@@ -308,6 +305,12 @@ func (s *RequestStream) sendRequestHeader(req *http.Request) error {
return s.requestWriter.WriteRequestHeader(s.str.datagramStream, req, s.requestedGzip, s.str.StreamID(), s.str.qlogger)
}
+// sendRequestTrailer sends request trailers to the stream.
+// It should be called after the request body has been fully written.
+func (s *RequestStream) sendRequestTrailer(req *http.Request) error {
+ return s.requestWriter.WriteRequestTrailer(s.str.datagramStream, req, s.str.StreamID(), s.str.qlogger)
+}
+
// ReadResponse reads the HTTP response from the stream.
//
// It must be called after sending the request (using SendRequestHeader).
@@ -316,7 +319,7 @@ func (s *RequestStream) sendRequestHeader(req *http.Request) error {
// It is invalid to call it after Read has been called.
func (s *RequestStream) ReadResponse() (*http.Response, error) {
if !s.sentRequest {
- return nil, errors.New("http3: invalid duplicate use of RequestStream.ReadResponse before SendRequestHeader")
+ return nil, errors.New("http3: invalid use of RequestStream.ReadResponse before SendRequestHeader")
}
frame, err := s.str.frameParser.ParseNext(s.str.qlogger)
if err != nil {
diff --git a/vendor/github.com/quic-go/quic-go/http3/transport.go b/vendor/github.com/quic-go/quic-go/http3/transport.go
index 859977945..96252578e 100644
--- a/vendor/github.com/quic-go/quic-go/http3/transport.go
+++ b/vendor/github.com/quic-go/quic-go/http3/transport.go
@@ -40,6 +40,7 @@ type RoundTripOpt struct {
type clientConn interface {
OpenRequestStream(context.Context) (*RequestStream, error)
RoundTrip(*http.Request) (*http.Response, error)
+ handleUnidirectionalStream(*quic.ReceiveStream)
}
type roundTripperWithCount struct {
@@ -97,9 +98,6 @@ type Transport struct {
// However, if the user explicitly requested gzip it is not automatically uncompressed.
DisableCompression bool
- StreamHijacker func(FrameType, quic.ConnectionTracingID, *quic.Stream, error) (hijacked bool, err error)
- UniStreamHijacker func(StreamType, quic.ConnectionTracingID, *quic.ReceiveStream, error) (hijacked bool)
-
Logger *slog.Logger
mutex sync.Mutex
@@ -133,8 +131,6 @@ func (t *Transport) init() error {
conn,
t.EnableDatagrams,
t.AdditionalSettings,
- t.StreamHijacker,
- t.UniStreamHijacker,
t.MaxResponseHeaderBytes,
t.DisableCompression,
t.Logger,
@@ -388,7 +384,17 @@ func (t *Transport) dial(ctx context.Context, hostname string) (*quic.Conn, clie
if err != nil {
return nil, nil, err
}
- return conn, t.newClientConn(conn), nil
+ clientConn := t.newClientConn(conn)
+ go func() {
+ for {
+ str, err := conn.AcceptUniStream(context.Background())
+ if err != nil {
+ return
+ }
+ go clientConn.handleUnidirectionalStream(str)
+ }
+ }()
+ return conn, clientConn, nil
}
func (t *Transport) resolveUDPAddr(ctx context.Context, network, addr string) (*net.UDPAddr, error) {
@@ -426,16 +432,41 @@ func (t *Transport) removeClient(hostname string) {
// Obtaining a ClientConn is only needed for more advanced use cases, such as
// using Extended CONNECT for WebTransport or the various MASQUE protocols.
func (t *Transport) NewClientConn(conn *quic.Conn) *ClientConn {
- return newClientConn(
+ c := newClientConn(
conn,
t.EnableDatagrams,
t.AdditionalSettings,
- t.StreamHijacker,
- t.UniStreamHijacker,
t.MaxResponseHeaderBytes,
t.DisableCompression,
t.Logger,
)
+ go func() {
+ for {
+ str, err := conn.AcceptUniStream(context.Background())
+ if err != nil {
+ return
+ }
+ go c.handleUnidirectionalStream(str)
+ }
+ }()
+ return c
+}
+
+// NewRawClientConn creates a new low-level HTTP/3 client connection on top of a QUIC connection.
+// Unlike NewClientConn, the returned RawClientConn allows the application to take control
+// of the stream accept loops, by calling HandleUnidirectionalStream for incoming unidirectional
+// streams and HandleBidirectionalStream for incoming bidirectional streams.
+func (t *Transport) NewRawClientConn(conn *quic.Conn) *RawClientConn {
+ return &RawClientConn{
+ ClientConn: newClientConn(
+ conn,
+ t.EnableDatagrams,
+ t.AdditionalSettings,
+ t.MaxResponseHeaderBytes,
+ t.DisableCompression,
+ t.Logger,
+ ),
+ }
}
// Close closes the QUIC connections that this Transport has used.
diff --git a/vendor/github.com/quic-go/quic-go/interface.go b/vendor/github.com/quic-go/quic-go/interface.go
index 984e3b12f..119e32056 100644
--- a/vendor/github.com/quic-go/quic-go/interface.go
+++ b/vendor/github.com/quic-go/quic-go/interface.go
@@ -58,20 +58,6 @@ type TokenStore interface {
// when the server rejects a 0-RTT connection attempt.
var Err0RTTRejected = errors.New("0-RTT rejected")
-// ConnectionTracingKey can be used to associate a [logging.ConnectionTracer] with a [Conn].
-// It is set on the Conn.Context() context,
-// as well as on the context passed to logging.Tracer.NewConnectionTracer.
-//
-// Deprecated: Applications can set their own tracing key using Transport.ConnContext.
-var ConnectionTracingKey = connTracingCtxKey{}
-
-// ConnectionTracingID is the type of the context value saved under the ConnectionTracingKey.
-//
-// Deprecated: Applications can set their own tracing key using Transport.ConnContext.
-type ConnectionTracingID uint64
-
-type connTracingCtxKey struct{}
-
// QUICVersionContextKey can be used to find out the QUIC version of a TLS handshake from the
// context returned by tls.Config.ClientInfo.Context.
var QUICVersionContextKey = handshake.QUICVersionContextKey
@@ -193,11 +179,6 @@ type Config struct {
Tracer func(ctx context.Context, isClient bool, connID ConnectionID) qlogwriter.Trace
}
-// ClientHelloInfo contains information about an incoming connection attempt.
-//
-// Deprecated: Use ClientInfo instead.
-type ClientHelloInfo = ClientInfo
-
// ClientInfo contains information about an incoming connection attempt.
type ClientInfo struct {
// RemoteAddr is the remote address on the Initial packet.
@@ -213,13 +194,18 @@ type ClientInfo struct {
type ConnectionState struct {
// TLS contains information about the TLS connection state, incl. the tls.ConnectionState.
TLS tls.ConnectionState
- // SupportsDatagrams indicates whether the peer advertised support for QUIC datagrams (RFC 9221).
- // When true, datagrams can be sent using the Conn's SendDatagram method.
- // This is a unilateral declaration by the peer - receiving datagrams is only possible if
- // datagram support was enabled locally via Config.EnableDatagrams.
- SupportsDatagrams bool
- // SupportsStreamResetPartialDelivery indicates whether the peer advertised support for QUIC Stream Resets with Partial Delivery.
- SupportsStreamResetPartialDelivery bool
+ // SupportsDatagrams indicates support for QUIC datagrams (RFC 9221).
+ SupportsDatagrams struct {
+ // Remote is true if the peer advertised datagram support.
+ // Local is true if datagram support was enabled via Config.EnableDatagrams.
+ Remote, Local bool
+ }
+ // SupportsStreamResetPartialDelivery indicates support for QUIC Stream Resets with Partial Delivery.
+ SupportsStreamResetPartialDelivery struct {
+ // Remote is true if the peer advertised support.
+ // Local is true if support was enabled via Config.EnableStreamResetPartialDelivery.
+ Remote, Local bool
+ }
// Used0RTT says if 0-RTT resumption was used.
Used0RTT bool
// Version is the QUIC version of the QUIC connection.
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go
deleted file mode 100644
index c399358f2..000000000
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ackhandler
-
-import (
- "github.com/quic-go/quic-go/internal/protocol"
- "github.com/quic-go/quic-go/internal/utils"
- "github.com/quic-go/quic-go/qlogwriter"
-)
-
-// NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler.
-// clientAddressValidated indicates whether the address was validated beforehand by an address validation token.
-// clientAddressValidated has no effect for a client.
-func NewAckHandler(
- initialPacketNumber protocol.PacketNumber,
- initialMaxDatagramSize protocol.ByteCount,
- rttStats *utils.RTTStats,
- connStats *utils.ConnectionStats,
- clientAddressValidated bool,
- enableECN bool,
- pers protocol.Perspective,
- qlogger qlogwriter.Recorder,
- logger utils.Logger,
-) (SentPacketHandler, ReceivedPacketHandler) {
- sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, connStats, clientAddressValidated, enableECN, pers, qlogger, logger)
- return sph, newReceivedPacketHandler(sph, logger)
-}
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
index 9ee9da7d9..620a5e11f 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
@@ -13,6 +13,7 @@ type SentPacketHandler interface {
// ReceivedAck processes an ACK frame.
// It does not store a copy of the frame.
ReceivedAck(f *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime monotime.Time) (bool /* 1-RTT packet acked */, error)
+ ReceivedPacket(protocol.EncryptionLevel, monotime.Time)
ReceivedBytes(_ protocol.ByteCount, rcvTime monotime.Time)
DropPackets(_ protocol.EncryptionLevel, rcvTime monotime.Time)
ResetForRetry(rcvTime monotime.Time)
@@ -36,18 +37,3 @@ type SentPacketHandler interface {
MigratedPath(now monotime.Time, initialMaxPacketSize protocol.ByteCount)
}
-
-type sentPacketTracker interface {
- GetLowestPacketNotConfirmedAcked() protocol.PacketNumber
- ReceivedPacket(_ protocol.EncryptionLevel, rcvTime monotime.Time)
-}
-
-// ReceivedPacketHandler handles ACKs needed to send for incoming packets
-type ReceivedPacketHandler interface {
- IsPotentiallyDuplicate(protocol.PacketNumber, protocol.EncryptionLevel) bool
- ReceivedPacket(pn protocol.PacketNumber, ecn protocol.ECN, encLevel protocol.EncryptionLevel, rcvTime monotime.Time, ackEliciting bool) error
- DropPackets(protocol.EncryptionLevel)
-
- GetAlarmTimeout() monotime.Time
- GetAckFrame(_ protocol.EncryptionLevel, now monotime.Time, onlyIfQueued bool) *wire.AckFrame
-}
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go
index 323d9fde9..3add80d1f 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go
@@ -2,8 +2,5 @@
package ackhandler
-//go:generate sh -c "go tool mockgen -typed -build_flags=\"-tags=gomock\" -package ackhandler -destination mock_sent_packet_tracker_test.go github.com/quic-go/quic-go/internal/ackhandler SentPacketTracker"
-type SentPacketTracker = sentPacketTracker
-
//go:generate sh -c "go tool mockgen -typed -build_flags=\"-tags=gomock\" -package ackhandler -destination mock_ecn_handler_test.go github.com/quic-go/quic-go/internal/ackhandler ECNHandler"
type ECNHandler = ecnHandler
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
index f0500e9f2..7d9863000 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
@@ -24,12 +24,15 @@ type packet struct {
IsPathMTUProbePacket bool // We don't report the loss of Path MTU probe packets to the congestion controller.
includedInBytesInFlight bool
- declaredLost bool
isPathProbePacket bool
}
-func (p *packet) outstanding() bool {
- return !p.declaredLost && !p.IsPathMTUProbePacket && !p.isPathProbePacket
+func (p *packet) Outstanding() bool {
+ return !p.IsPathMTUProbePacket && !p.isPathProbePacket && p.IsAckEliciting()
+}
+
+func (p *packet) IsAckEliciting() bool {
+ return len(p.StreamFrames) > 0 || len(p.Frames) > 0
}
var packetPool = sync.Pool{New: func() any { return &packet{} }}
@@ -44,7 +47,6 @@ func getPacket() *packet {
p.SendTime = 0
p.IsPathMTUProbePacket = false
p.includedInBytesInFlight = false
- p.declaredLost = false
p.isPathProbePacket = false
return p
}
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
index 7180db85f..d0d24bf30 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
@@ -9,9 +9,7 @@ import (
"github.com/quic-go/quic-go/internal/wire"
)
-type receivedPacketHandler struct {
- sentPackets sentPacketTracker
-
+type ReceivedPacketHandler struct {
initialPackets *receivedPacketTracker
handshakePackets *receivedPacketTracker
appDataPackets appDataReceivedPacketTracker
@@ -19,11 +17,8 @@ type receivedPacketHandler struct {
lowest1RTTPacket protocol.PacketNumber
}
-var _ ReceivedPacketHandler = &receivedPacketHandler{}
-
-func newReceivedPacketHandler(sentPackets sentPacketTracker, logger utils.Logger) ReceivedPacketHandler {
- return &receivedPacketHandler{
- sentPackets: sentPackets,
+func NewReceivedPacketHandler(logger utils.Logger) *ReceivedPacketHandler {
+ return &ReceivedPacketHandler{
initialPackets: newReceivedPacketTracker(),
handshakePackets: newReceivedPacketTracker(),
appDataPackets: *newAppDataReceivedPacketTracker(logger),
@@ -31,14 +26,13 @@ func newReceivedPacketHandler(sentPackets sentPacketTracker, logger utils.Logger
}
}
-func (h *receivedPacketHandler) ReceivedPacket(
+func (h *ReceivedPacketHandler) ReceivedPacket(
pn protocol.PacketNumber,
ecn protocol.ECN,
encLevel protocol.EncryptionLevel,
rcvTime monotime.Time,
ackEliciting bool,
) error {
- h.sentPackets.ReceivedPacket(encLevel, rcvTime)
switch encLevel {
case protocol.EncryptionInitial:
return h.initialPackets.ReceivedPacket(pn, ecn, ackEliciting)
@@ -58,17 +52,17 @@ func (h *receivedPacketHandler) ReceivedPacket(
if h.lowest1RTTPacket == protocol.InvalidPacketNumber || pn < h.lowest1RTTPacket {
h.lowest1RTTPacket = pn
}
- if err := h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting); err != nil {
- return err
- }
- h.appDataPackets.IgnoreBelow(h.sentPackets.GetLowestPacketNotConfirmedAcked())
- return nil
+ return h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting)
default:
panic(fmt.Sprintf("received packet with unknown encryption level: %s", encLevel))
}
}
-func (h *receivedPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) {
+func (h *ReceivedPacketHandler) IgnorePacketsBelow(pn protocol.PacketNumber) {
+ h.appDataPackets.IgnoreBelow(pn)
+}
+
+func (h *ReceivedPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) {
//nolint:exhaustive // 1-RTT packet number space is never dropped.
switch encLevel {
case protocol.EncryptionInitial:
@@ -83,11 +77,11 @@ func (h *receivedPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) {
}
}
-func (h *receivedPacketHandler) GetAlarmTimeout() monotime.Time {
+func (h *ReceivedPacketHandler) GetAlarmTimeout() monotime.Time {
return h.appDataPackets.GetAlarmTimeout()
}
-func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, now monotime.Time, onlyIfQueued bool) *wire.AckFrame {
+func (h *ReceivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, now monotime.Time, onlyIfQueued bool) *wire.AckFrame {
//nolint:exhaustive // 0-RTT packets can't contain ACK frames.
switch encLevel {
case protocol.EncryptionInitial:
@@ -108,7 +102,7 @@ func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, n
}
}
-func (h *receivedPacketHandler) IsPotentiallyDuplicate(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel) bool {
+func (h *ReceivedPacketHandler) IsPotentiallyDuplicate(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel) bool {
switch encLevel {
case protocol.EncryptionInitial:
if h.initialPackets != nil {
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
index e134232c4..9b539eadd 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
@@ -69,6 +69,8 @@ type sentPacketHandler struct {
handshakePackets *packetNumberSpace
appDataPackets *packetNumberSpace
lostPackets lostPacketTracker // only for application-data packet number space
+ // send time of the largest acknowledged packet, across all packet number spaces
+ largestAckedTime monotime.Time
// Do we know that the peer completed address validation yet?
// Always true for the server.
@@ -81,11 +83,7 @@ type sentPacketHandler struct {
handshakeConfirmed bool
- // lowestNotConfirmedAcked is the lowest packet number that we sent an ACK for, but haven't received confirmation, that this ACK actually arrived
- // example: we send an ACK for packets 90-100 with packet number 20
- // once we receive an ACK from the peer for packet 20, the lowestNotConfirmedAcked is 101
- // Only applies to the application-data packet number space.
- lowestNotConfirmedAcked protocol.PacketNumber
+ ignorePacketsBelow func(protocol.PacketNumber)
ackedPackets []packetWithPacketNumber // to avoid allocations in detectAndRemoveAckedPackets
@@ -115,24 +113,22 @@ type sentPacketHandler struct {
logger utils.Logger
}
-var (
- _ SentPacketHandler = &sentPacketHandler{}
- _ sentPacketTracker = &sentPacketHandler{}
-)
+var _ SentPacketHandler = &sentPacketHandler{}
// clientAddressValidated indicates whether the address was validated beforehand by an address validation token.
// If the address was validated, the amplification limit doesn't apply. It has no effect for a client.
-func newSentPacketHandler(
+func NewSentPacketHandler(
initialPN protocol.PacketNumber,
initialMaxDatagramSize protocol.ByteCount,
rttStats *utils.RTTStats,
connStats *utils.ConnectionStats,
clientAddressValidated bool,
enableECN bool,
+ ignorePacketsBelow func(protocol.PacketNumber),
pers protocol.Perspective,
qlogger qlogwriter.Recorder,
logger utils.Logger,
-) *sentPacketHandler {
+) SentPacketHandler {
congestion := congestion.NewCubicSender(
congestion.DefaultClock{},
rttStats,
@@ -152,6 +148,7 @@ func newSentPacketHandler(
rttStats: rttStats,
connStats: connStats,
congestion: congestion,
+ ignorePacketsBelow: ignorePacketsBelow,
perspective: pers,
qlogger: qlogger,
logger: logger,
@@ -242,12 +239,12 @@ func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel, t monotim
}
func (h *sentPacketHandler) packetsInFlight() int {
- packetsInFlight := h.appDataPackets.history.Len()
+ packetsInFlight := h.appDataPackets.history.NumOutstanding()
if h.handshakePackets != nil {
- packetsInFlight += h.handshakePackets.history.Len()
+ packetsInFlight += h.handshakePackets.history.NumOutstanding()
}
if h.initialPackets != nil {
- packetsInFlight += h.initialPackets.history.Len()
+ packetsInFlight += h.initialPackets.history.NumOutstanding()
}
return packetsInFlight
}
@@ -275,15 +272,19 @@ func (h *sentPacketHandler) SentPacket(
}
pnSpace.largestSent = pn
- isAckEliciting := len(streamFrames) > 0 || len(frames) > 0
+
+ p := getPacket()
+ p.SendTime = t
+ p.EncryptionLevel = encLevel
+ p.Length = size
+ p.Frames = frames
+ p.LargestAcked = largestAcked
+ p.StreamFrames = streamFrames
+ p.IsPathMTUProbePacket = isPathMTUProbePacket
+ p.isPathProbePacket = isPathProbePacket
+ isAckEliciting := p.IsAckEliciting()
if isPathProbePacket {
- p := getPacket()
- p.SendTime = t
- p.EncryptionLevel = encLevel
- p.Length = size
- p.Frames = frames
- p.isPathProbePacket = true
pnSpace.history.SentPathProbePacket(pn, p)
h.setLossDetectionTimer(t)
return
@@ -291,6 +292,7 @@ func (h *sentPacketHandler) SentPacket(
if isAckEliciting {
pnSpace.lastAckElicitingPacketTime = t
h.bytesInFlight += size
+ p.includedInBytesInFlight = true
if h.numProbesToSend > 0 {
h.numProbesToSend--
}
@@ -301,25 +303,13 @@ func (h *sentPacketHandler) SentPacket(
h.ecnTracker.SentPacket(pn, ecn)
}
+ pnSpace.history.SentPacket(pn, p)
if !isAckEliciting {
- pnSpace.history.SentNonAckElicitingPacket(pn)
if !h.peerCompletedAddressValidation {
h.setLossDetectionTimer(t)
}
return
}
-
- p := getPacket()
- p.SendTime = t
- p.EncryptionLevel = encLevel
- p.Length = size
- p.LargestAcked = largestAcked
- p.StreamFrames = streamFrames
- p.Frames = frames
- p.IsPathMTUProbePacket = isPathMTUProbePacket
- p.includedInBytesInFlight = true
-
- pnSpace.history.SentAckElicitingPacket(pn, p)
if h.qlogger != nil {
h.qlogMetricsUpdated()
}
@@ -361,8 +351,9 @@ func (h *sentPacketHandler) qlogMetricsUpdated() {
h.lastMetrics.BytesInFlight = metricsUpdatedEvent.BytesInFlight
updated = true
}
- if h.lastMetrics.PacketsInFlight != h.packetsInFlight() {
- metricsUpdatedEvent.PacketsInFlight = h.packetsInFlight()
+ packetsInFlight := h.packetsInFlight()
+ if h.lastMetrics.PacketsInFlight != packetsInFlight {
+ metricsUpdatedEvent.PacketsInFlight = packetsInFlight
h.lastMetrics.PacketsInFlight = metricsUpdatedEvent.PacketsInFlight
updated = true
}
@@ -405,21 +396,26 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
}
priorInFlight := h.bytesInFlight
- ackedPackets, err := h.detectAndRemoveAckedPackets(ack, encLevel)
+ ackedPackets, hasAckEliciting, err := h.detectAndRemoveAckedPackets(ack, encLevel)
if err != nil || len(ackedPackets) == 0 {
return false, err
}
- // update the RTT, if the largest acked is newly acknowledged
+ // update the RTT, if:
+ // * the largest acked is newly acknowledged, AND
+ // * at least one new ack-eliciting packet was acknowledged
if len(ackedPackets) > 0 {
- if p := ackedPackets[len(ackedPackets)-1]; p.PacketNumber == ack.LargestAcked() && !p.isPathProbePacket {
+ if p := ackedPackets[len(ackedPackets)-1]; p.PacketNumber == ack.LargestAcked() && !p.isPathProbePacket && hasAckEliciting {
// don't use the ack delay for Initial and Handshake packets
var ackDelay time.Duration
if encLevel == protocol.Encryption1RTT {
ackDelay = min(ack.DelayTime, h.rttStats.MaxAckDelay())
}
- h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay)
- if h.logger.Debug() {
- h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
+ if h.largestAckedTime.IsZero() || !p.SendTime.Before(h.largestAckedTime) {
+ h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay)
+ if h.logger.Debug() {
+ h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
+ }
+ h.largestAckedTime = p.SendTime
}
h.congestion.MaybeExitSlowStart()
}
@@ -441,7 +437,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
}
var acked1RTTPacket bool
for _, p := range ackedPackets {
- if p.includedInBytesInFlight && !p.declaredLost {
+ if p.includedInBytesInFlight {
h.congestion.OnPacketAcked(p.PacketNumber, p.Length, priorInFlight, rcvTime)
}
if p.EncryptionLevel == protocol.Encryption1RTT {
@@ -526,14 +522,13 @@ func (h *sentPacketHandler) detectSpuriousLosses(ack *wire.AckFrame, ackTime mon
}
}
-func (h *sentPacketHandler) GetLowestPacketNotConfirmedAcked() protocol.PacketNumber {
- return h.lowestNotConfirmedAcked
-}
-
// Packets are returned in ascending packet number order.
-func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encLevel protocol.EncryptionLevel) ([]packetWithPacketNumber, error) {
+func (h *sentPacketHandler) detectAndRemoveAckedPackets(
+ ack *wire.AckFrame,
+ encLevel protocol.EncryptionLevel,
+) (_ []packetWithPacketNumber, hasAckEliciting bool, _ error) {
if len(h.ackedPackets) > 0 {
- return nil, errors.New("ackhandler BUG: ackedPackets slice not empty")
+ return nil, false, errors.New("ackhandler BUG: ackedPackets slice not empty")
}
pnSpace := h.getPacketNumberSpace(encLevel)
@@ -541,7 +536,7 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
if encLevel == protocol.Encryption1RTT {
for p := range pnSpace.history.SkippedPackets() {
if ack.AcksPacket(p) {
- return nil, &qerr.TransportError{
+ return nil, false, &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("received an ACK for skipped packet number: %d (%s)", p, encLevel),
}
@@ -573,7 +568,7 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
continue
}
if pn > ackRange.Largest {
- return nil, fmt.Errorf("BUG: ackhandler would have acked wrong packet %d, while evaluating range %d -> %d", pn, ackRange.Smallest, ackRange.Largest)
+ return nil, false, fmt.Errorf("BUG: ackhandler would have acked wrong packet %d, while evaluating range %d -> %d", pn, ackRange.Smallest, ackRange.Largest)
}
}
if p.isPathProbePacket {
@@ -584,6 +579,9 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
}
continue
}
+ if p.IsAckEliciting() {
+ hasAckEliciting = true
+ }
h.ackedPackets = append(h.ackedPackets, packetWithPacketNumber{PacketNumber: pn, packet: p})
}
if h.logger.Debug() && len(h.ackedPackets) > 0 {
@@ -595,8 +593,8 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
}
for _, p := range h.ackedPackets {
- if p.LargestAcked != protocol.InvalidPacketNumber && encLevel == protocol.Encryption1RTT {
- h.lowestNotConfirmedAcked = max(h.lowestNotConfirmedAcked, p.LargestAcked+1)
+ if p.LargestAcked != protocol.InvalidPacketNumber && encLevel == protocol.Encryption1RTT && h.ignorePacketsBelow != nil {
+ h.ignorePacketsBelow(p.LargestAcked + 1)
}
for _, f := range p.Frames {
@@ -610,11 +608,11 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
}
}
if err := pnSpace.history.Remove(p.PacketNumber); err != nil {
- return nil, err
+ return nil, false, err
}
}
// TODO: add support for the transport:packets_acked qlog event
- return h.ackedPackets, nil
+ return h.ackedPackets, hasAckEliciting, nil
}
func (h *sentPacketHandler) getLossTimeAndSpace() (monotime.Time, protocol.EncryptionLevel) {
@@ -808,7 +806,7 @@ func (h *sentPacketHandler) detectLostPackets(now monotime.Time, encLevel protoc
var packetLost bool
if !p.SendTime.After(lostSendTime) {
packetLost = true
- if !p.isPathProbePacket {
+ if !p.isPathProbePacket && p.IsAckEliciting() {
if h.logger.Debug() {
h.logger.Debugf("\tlost packet %d (time threshold)", pn)
}
@@ -824,7 +822,7 @@ func (h *sentPacketHandler) detectLostPackets(now monotime.Time, encLevel protoc
}
} else if pnSpace.history.Difference(pnSpace.largestAcked, pn) >= packetThreshold {
packetLost = true
- if !p.isPathProbePacket {
+ if !p.isPathProbePacket && p.IsAckEliciting() {
if h.logger.Debug() {
h.logger.Debugf("\tlost packet %d (reordering threshold)", pn)
}
@@ -851,7 +849,7 @@ func (h *sentPacketHandler) detectLostPackets(now monotime.Time, encLevel protoc
h.lostPackets.Add(pn, p.SendTime)
}
pnSpace.history.DeclareLost(pn)
- if !p.isPathProbePacket {
+ if !p.isPathProbePacket && p.IsAckEliciting() {
// the bytes in flight need to be reduced no matter if the frames in this packet will be retransmitted
h.removeFromBytesInFlight(p)
h.queueFramesForRetransmission(p)
@@ -1046,11 +1044,12 @@ func (h *sentPacketHandler) QueueProbePacket(encLevel protocol.EncryptionLevel)
if p == nil {
return false
}
- h.queueFramesForRetransmission(p)
// TODO: don't declare the packet lost here.
// Keep track of acknowledged frames instead.
- h.removeFromBytesInFlight(p)
+ // Call DeclareLost before queueFramesForRetransmission, which clears the packet's frames.
pnSpace.history.DeclareLost(pn)
+ h.removeFromBytesInFlight(p)
+ h.queueFramesForRetransmission(p)
return true
}
@@ -1079,14 +1078,14 @@ func (h *sentPacketHandler) ResetForRetry(now monotime.Time) {
if firstPacketSendTime.IsZero() {
firstPacketSendTime = p.SendTime
}
- if !p.declaredLost {
+ if p.IsAckEliciting() {
h.queueFramesForRetransmission(p)
}
}
// All application data packets sent at this point are 0-RTT packets.
// In the case of a Retry, we can assume that the server dropped all of them.
for _, p := range h.appDataPackets.history.Packets() {
- if !p.declaredLost {
+ if p.IsAckEliciting() {
h.queueFramesForRetransmission(p)
}
}
@@ -1124,7 +1123,9 @@ func (h *sentPacketHandler) MigratedPath(now monotime.Time, initialMaxDatagramSi
h.appDataPackets.history.DeclareLost(pn)
if !p.isPathProbePacket {
h.removeFromBytesInFlight(p)
- h.queueFramesForRetransmission(p)
+ if p.IsAckEliciting() {
+ h.queueFramesForRetransmission(p)
+ }
}
}
for pn := range h.appDataPackets.history.PathProbes() {
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
index 54b74cf74..1500e1cdf 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
@@ -58,17 +58,10 @@ func (h *sentPacketHistory) SkippedPacket(pn protocol.PacketNumber) {
h.skippedPackets = append(h.skippedPackets, pn)
}
-func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber) {
- h.checkSequentialPacketNumberUse(pn)
- if len(h.packets) > 0 {
- h.packets = append(h.packets, nil)
- }
-}
-
-func (h *sentPacketHistory) SentAckElicitingPacket(pn protocol.PacketNumber, p *packet) {
+func (h *sentPacketHistory) SentPacket(pn protocol.PacketNumber, p *packet) {
h.checkSequentialPacketNumberUse(pn)
h.packets = append(h.packets, p)
- if p.outstanding() {
+ if p.Outstanding() {
h.numOutstanding++
}
}
@@ -111,7 +104,7 @@ func (h *sentPacketHistory) FirstOutstanding() (protocol.PacketNumber, *packet)
return protocol.InvalidPacketNumber, nil
}
for i, p := range h.packets {
- if p != nil && p.outstanding() {
+ if p != nil && p.Outstanding() {
return h.firstPacketNumber + protocol.PacketNumber(i), p
}
}
@@ -140,6 +133,10 @@ func (h *sentPacketHistory) Len() int {
return len(h.packets)
}
+func (h *sentPacketHistory) NumOutstanding() int {
+ return h.numOutstanding
+}
+
// Remove removes a packet from the sent packet history.
// It must not be used for skipped packet numbers.
func (h *sentPacketHistory) Remove(pn protocol.PacketNumber) error {
@@ -148,7 +145,7 @@ func (h *sentPacketHistory) Remove(pn protocol.PacketNumber) error {
return fmt.Errorf("packet %d not found in sent packet history", pn)
}
p := h.packets[idx]
- if p.outstanding() {
+ if p.Outstanding() {
h.numOutstanding--
if h.numOutstanding < 0 {
panic("negative number of outstanding packets")
@@ -243,7 +240,7 @@ func (h *sentPacketHistory) DeclareLost(pn protocol.PacketNumber) {
return
}
p := h.packets[idx]
- if p.outstanding() {
+ if p.Outstanding() {
h.numOutstanding--
if h.numOutstanding < 0 {
panic("negative number of outstanding packets")
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go
index 1baf5d6b0..ce83ab18c 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go
@@ -6,7 +6,7 @@ import (
"github.com/quic-go/quic-go/internal/protocol"
)
-func createAEAD(suite *cipherSuite, trafficSecret []byte, v protocol.Version) *xorNonceAEAD {
+func createAEAD(suite cipherSuite, trafficSecret []byte, v protocol.Version) *xorNonceAEAD {
keyLabel := hkdfLabelKeyV1
ivLabel := hkdfLabelIVV1
if v == protocol.Version2 {
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go b/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go
index d8a381daf..03fe0dad8 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go
@@ -23,14 +23,14 @@ type cipherSuite struct {
func (s cipherSuite) IVLen() int { return aeadNonceLength }
-func getCipherSuite(id uint16) *cipherSuite {
+func getCipherSuite(id uint16) cipherSuite {
switch id {
case tls.TLS_AES_128_GCM_SHA256:
- return &cipherSuite{ID: tls.TLS_AES_128_GCM_SHA256, Hash: crypto.SHA256, KeyLen: 16, AEAD: aeadAESGCMTLS13}
+ return cipherSuite{ID: tls.TLS_AES_128_GCM_SHA256, Hash: crypto.SHA256, KeyLen: 16, AEAD: aeadAESGCMTLS13}
case tls.TLS_CHACHA20_POLY1305_SHA256:
- return &cipherSuite{ID: tls.TLS_CHACHA20_POLY1305_SHA256, Hash: crypto.SHA256, KeyLen: 32, AEAD: aeadChaCha20Poly1305}
+ return cipherSuite{ID: tls.TLS_CHACHA20_POLY1305_SHA256, Hash: crypto.SHA256, KeyLen: 32, AEAD: aeadChaCha20Poly1305}
case tls.TLS_AES_256_GCM_SHA384:
- return &cipherSuite{ID: tls.TLS_AES_256_GCM_SHA384, Hash: crypto.SHA384, KeyLen: 32, AEAD: aeadAESGCMTLS13}
+ return cipherSuite{ID: tls.TLS_AES_256_GCM_SHA384, Hash: crypto.SHA384, KeyLen: 32, AEAD: aeadAESGCMTLS13}
default:
panic(fmt.Sprintf("unknown cypher suite: %d", id))
}
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
index 534934515..d481ac675 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
@@ -387,19 +387,29 @@ func (h *cryptoSetup) GetSessionTicket() ([]byte, error) {
// We can't check h.tlsConfig here, since the actual config might have been obtained from
// the GetConfigForClient callback.
// See https://github.com/golang/go/issues/62032.
- // Once that issue is resolved, this error assertion can be removed.
+ // This error assertion can be removed once we drop support for Go 1.25.
if strings.Contains(err.Error(), "session ticket keys unavailable") {
return nil, nil
}
return nil, err
}
- ev := h.conn.NextEvent()
- if ev.Kind != tls.QUICWriteData || ev.Level != tls.QUICEncryptionLevelApplication {
- panic("crypto/tls bug: where's my session ticket?")
- }
- ticket := ev.Data
- if ev := h.conn.NextEvent(); ev.Kind != tls.QUICNoEvent {
- panic("crypto/tls bug: why more than one ticket?")
+ // If session tickets are disabled, NextEvent will immediately return QUICNoEvent,
+ // and we will return a nil ticket.
+ var ticket []byte
+ for {
+ ev := h.conn.NextEvent()
+ if ev.Kind == tls.QUICNoEvent {
+ break
+ }
+ if ev.Kind == tls.QUICWriteData && ev.Level == tls.QUICEncryptionLevelApplication {
+ if ticket != nil {
+ h.logger.Errorf("unexpected multiple session tickets")
+ continue
+ }
+ ticket = ev.Data
+ } else {
+ h.logger.Errorf("unexpected event: %v", ev.Kind)
+ }
}
return ticket, nil
}
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go
index 2c5ee42f1..93c3cd980 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go
@@ -24,7 +24,7 @@ func hkdfHeaderProtectionLabel(v protocol.Version) string {
return "quic hp"
}
-func newHeaderProtector(suite *cipherSuite, trafficSecret []byte, isLongHeader bool, v protocol.Version) headerProtector {
+func newHeaderProtector(suite cipherSuite, trafficSecret []byte, isLongHeader bool, v protocol.Version) headerProtector {
hkdfLabel := hkdfHeaderProtectionLabel(v)
switch suite.ID {
case tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384:
@@ -44,7 +44,7 @@ type aesHeaderProtector struct {
var _ headerProtector = &aesHeaderProtector{}
-func newAESHeaderProtector(suite *cipherSuite, trafficSecret []byte, isLongHeader bool, hkdfLabel string) headerProtector {
+func newAESHeaderProtector(suite cipherSuite, trafficSecret []byte, isLongHeader bool, hkdfLabel string) headerProtector {
hpKey := hkdfExpandLabel(suite.Hash, trafficSecret, []byte{}, hkdfLabel, suite.KeyLen)
block, err := aes.NewCipher(hpKey)
if err != nil {
@@ -88,7 +88,7 @@ type chachaHeaderProtector struct {
var _ headerProtector = &chachaHeaderProtector{}
-func newChaChaHeaderProtector(suite *cipherSuite, trafficSecret []byte, isLongHeader bool, hkdfLabel string) headerProtector {
+func newChaChaHeaderProtector(suite cipherSuite, trafficSecret []byte, isLongHeader bool, hkdfLabel string) headerProtector {
hpKey := hkdfExpandLabel(suite.Hash, trafficSecret, []byte{}, hkdfLabel, suite.KeyLen)
p := &chachaHeaderProtector{
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go
index ae63eec89..f88f76ad8 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go
@@ -32,7 +32,7 @@ func SetKeyUpdateInterval(v uint64) (reset func()) {
var FirstKeyUpdateInterval uint64 = 100
type updatableAEAD struct {
- suite *cipherSuite
+ suite cipherSuite
keyPhase protocol.KeyPhase
largestAcked protocol.PacketNumber
@@ -136,10 +136,10 @@ func (a *updatableAEAD) getNextTrafficSecret(hash crypto.Hash, ts []byte) []byte
// SetReadKey sets the read key.
// For the client, this function is called before SetWriteKey.
// For the server, this function is called after SetWriteKey.
-func (a *updatableAEAD) SetReadKey(suite *cipherSuite, trafficSecret []byte) {
+func (a *updatableAEAD) SetReadKey(suite cipherSuite, trafficSecret []byte) {
a.rcvAEAD = createAEAD(suite, trafficSecret, a.version)
a.headerDecrypter = newHeaderProtector(suite, trafficSecret, false, a.version)
- if a.suite == nil {
+ if a.suite.ID == 0 { // suite is not set yet
a.setAEADParameters(a.rcvAEAD, suite)
}
@@ -150,10 +150,10 @@ func (a *updatableAEAD) SetReadKey(suite *cipherSuite, trafficSecret []byte) {
// SetWriteKey sets the write key.
// For the client, this function is called after SetReadKey.
// For the server, this function is called before SetReadKey.
-func (a *updatableAEAD) SetWriteKey(suite *cipherSuite, trafficSecret []byte) {
+func (a *updatableAEAD) SetWriteKey(suite cipherSuite, trafficSecret []byte) {
a.sendAEAD = createAEAD(suite, trafficSecret, a.version)
a.headerEncrypter = newHeaderProtector(suite, trafficSecret, false, a.version)
- if a.suite == nil {
+ if a.suite.ID == 0 { // suite is not set yet
a.setAEADParameters(a.sendAEAD, suite)
}
@@ -161,7 +161,7 @@ func (a *updatableAEAD) SetWriteKey(suite *cipherSuite, trafficSecret []byte) {
a.nextSendAEAD = createAEAD(suite, a.nextSendTrafficSecret, a.version)
}
-func (a *updatableAEAD) setAEADParameters(aead cipher.AEAD, suite *cipherSuite) {
+func (a *updatableAEAD) setAEADParameters(aead cipher.AEAD, suite cipherSuite) {
a.nonceBuf = make([]byte, aead.NonceSize())
a.aeadOverhead = aead.Overhead()
a.suite = suite
diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/params.go b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go
index fe86317fb..0861d57fc 100644
--- a/vendor/github.com/quic-go/quic-go/internal/protocol/params.go
+++ b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go
@@ -112,16 +112,13 @@ const MinStreamFrameSize ByteCount = 128
// we send after the handshake completes.
const MaxPostHandshakeCryptoFrameSize = 1000
-// MaxAckFrameSize is the maximum size for an ACK frame that we write
-// Due to the varint encoding, ACK frames can grow (almost) indefinitely large.
-// The MaxAckFrameSize should be large enough to encode many ACK range,
-// but must ensure that a maximum size ACK frame fits into one packet.
-const MaxAckFrameSize ByteCount = 1000
-
// MaxNumAckRanges is the maximum number of ACK ranges that we send in an ACK frame.
// It also serves as a limit for the packet history.
// If at any point we keep track of more ranges, old ranges are discarded.
-const MaxNumAckRanges = 32
+//
+// This value also guarantees that ACK Range Count value in the ACK frame can be encoded
+// in a single byte varint.
+const MaxNumAckRanges = 64
// MinPacingDelay is the minimum duration that is used for packet pacing
// If the packet packing frequency is higher, multiple packets might be sent at once.
diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go
index 68bebfa79..191e15307 100644
--- a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go
@@ -64,7 +64,7 @@ func parseAckFrame(frame *AckFrame, b []byte, typ FrameType, ackDelayExponent ui
frame.AckRanges = append(frame.AckRanges, AckRange{Smallest: smallest, Largest: largestAcked})
// read all the other ACK ranges
- for i := uint64(0); i < numBlocks; i++ {
+ for range numBlocks {
g, l, err := quicvarint.Parse(b)
if err != nil {
return 0, replaceUnexpectedEOF(err)
@@ -129,7 +129,7 @@ func (f *AckFrame) Append(b []byte, _ protocol.Version) ([]byte, error) {
b = quicvarint.Append(b, uint64(f.LargestAcked()))
b = quicvarint.Append(b, encodeAckDelay(f.DelayTime))
- numRanges := f.numEncodableAckRanges()
+ numRanges := min(len(f.AckRanges), protocol.MaxNumAckRanges)
b = quicvarint.Append(b, uint64(numRanges-1))
// write the first range
@@ -154,46 +154,69 @@ func (f *AckFrame) Append(b []byte, _ protocol.Version) ([]byte, error) {
// Length of a written frame
func (f *AckFrame) Length(_ protocol.Version) protocol.ByteCount {
largestAcked := f.AckRanges[0].Largest
- numRanges := f.numEncodableAckRanges()
- length := 1 + quicvarint.Len(uint64(largestAcked)) + quicvarint.Len(encodeAckDelay(f.DelayTime))
+ // The number of ACK ranges is limited to 64, which guarantees that the
+ // ACK Range Count value can be encoded in a single byte varint.
+ length := 1 + quicvarint.Len(uint64(largestAcked)) + quicvarint.Len(encodeAckDelay(f.DelayTime)) + 1
- length += quicvarint.Len(uint64(numRanges - 1))
lowestInFirstRange := f.AckRanges[0].Smallest
length += quicvarint.Len(uint64(largestAcked - lowestInFirstRange))
- for i := 1; i < numRanges; i++ {
+ for i := 1; i < min(len(f.AckRanges), protocol.MaxNumAckRanges); i++ {
gap, len := f.encodeAckRange(i)
length += quicvarint.Len(gap)
length += quicvarint.Len(len)
}
if f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0 {
- length += quicvarint.Len(f.ECT0)
- length += quicvarint.Len(f.ECT1)
- length += quicvarint.Len(f.ECNCE)
+ length += quicvarint.Len(f.ECT0) + quicvarint.Len(f.ECT1) + quicvarint.Len(f.ECNCE)
}
return protocol.ByteCount(length)
}
+// Truncate truncates the ACK frame to fit into maxSize,
+// and to at most 64 ACK ranges.
+// maxSize must be large enough to fit at least one ACK range.
+func (f *AckFrame) Truncate(maxSize protocol.ByteCount, _ protocol.Version) {
+ f.AckRanges = f.AckRanges[:f.numEncodableAckRanges(maxSize)]
+}
+
// gets the number of ACK ranges that can be encoded
-// such that the resulting frame is smaller than the maximum ACK frame size
-func (f *AckFrame) numEncodableAckRanges() int {
- length := 1 + quicvarint.Len(uint64(f.LargestAcked())) + quicvarint.Len(encodeAckDelay(f.DelayTime))
- length += 2 // assume that the number of ranges will consume 2 bytes
- for i := 1; i < len(f.AckRanges); i++ {
- gap, len := f.encodeAckRange(i)
- rangeLen := quicvarint.Len(gap) + quicvarint.Len(len)
- if protocol.ByteCount(length+rangeLen) > protocol.MaxAckFrameSize {
- // Writing range i would exceed the MaxAckFrameSize.
- // So encode one range less than that.
- return i - 1
+// such that the resulting frame is smaller than maxSize
+func (f *AckFrame) numEncodableAckRanges(maxSize protocol.ByteCount) int {
+ // Fast path: Most ACK frames are relatively small, and we don't need to calculate the exact length.
+ // We just assume the worst case scenario: every varint is encoded to 8 bytes.
+ // If the result is still smaller than the maximum ACK frame size, the actual ACK frame will definitely fit.
+ length := 1 + 8 /* largest acked */ + 8 /* delay */ + 1 /* ack range count */ + 8 /* first range */
+ if f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0 {
+ length += 8 + 8 + 8
+ }
+ numRanges := min(len(f.AckRanges), protocol.MaxNumAckRanges)
+ length += 2 * 8 * (numRanges - 1)
+ if protocol.ByteCount(length) <= maxSize {
+ return numRanges
+ }
+
+ // Slow path: Calculate the exact length of the ACK frame.
+ length = 1 + quicvarint.Len(uint64(f.LargestAcked())) + quicvarint.Len(encodeAckDelay(f.DelayTime)) + 1
+ _, firstRange := f.encodeAckRange(0)
+ length += quicvarint.Len(firstRange)
+ if f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0 {
+ length += quicvarint.Len(f.ECT0) + quicvarint.Len(f.ECT1) + quicvarint.Len(f.ECNCE)
+ }
+ for i := 1; i < numRanges; i++ {
+ gap, l := f.encodeAckRange(i)
+ rangeLen := quicvarint.Len(gap) + quicvarint.Len(l)
+ if protocol.ByteCount(length+rangeLen) > maxSize {
+ // Writing range i would exceed the maximum size,
+ // so encode one range less than that.
+ return i
}
length += rangeLen
}
- return len(f.AckRanges)
+ return numRanges
}
-func (f *AckFrame) encodeAckRange(i int) (uint64 /* gap */, uint64 /* length */) {
+func (f *AckFrame) encodeAckRange(i int) (gap, length uint64) {
if i == 0 {
return 0, uint64(f.AckRanges[0].Largest - f.AckRanges[0].Smallest)
}
@@ -218,7 +241,7 @@ func (f *AckFrame) validateAckRanges() bool {
}
}
- // check the consistency for ACK with multiple NACK ranges
+ // check the consistency for ACK with multiple ACK ranges
for i, ackRange := range f.AckRanges {
if i == 0 {
continue
diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go
index e3933da6b..175d3c86d 100644
--- a/vendor/github.com/quic-go/quic-go/packet_packer.go
+++ b/vendor/github.com/quic-go/quic-go/packet_packer.go
@@ -350,7 +350,6 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxSize protocol.ByteCo
now,
false,
onlyAck,
- true,
v,
)
if initialPayload.length > 0 {
@@ -373,7 +372,6 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxSize protocol.ByteCo
now,
false,
onlyAck,
- size == 0,
v,
)
if handshakePayload.length > 0 {
@@ -399,7 +397,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxSize protocol.ByteCo
connID = p.getDestConnID()
oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
hdrLen := wire.ShortHeaderLen(connID, oneRTTPacketNumberLen)
- oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxSize-size, onlyAck, size == 0, now, v)
+ oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxSize-size, onlyAck, now, v)
if oneRTTPayload.length > 0 {
size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, oneRTTPayload) + protocol.ByteCount(oneRTTSealer.Overhead())
}
@@ -486,7 +484,7 @@ func (p *packetPacker) appendPacket(
pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
connID := p.getDestConnID()
hdrLen := wire.ShortHeaderLen(connID, pnLen)
- pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, maxPacketSize, onlyAck, true, now, v)
+ pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, maxPacketSize, onlyAck, now, v)
if pl.length == 0 {
return shortHeaderPacket{}, errNothingToPack
}
@@ -500,17 +498,17 @@ func (p *packetPacker) maybeGetCryptoPacket(
encLevel protocol.EncryptionLevel,
now monotime.Time,
addPingIfEmpty bool,
- onlyAck, ackAllowed bool,
+ onlyAck bool,
v protocol.Version,
) (*wire.ExtendedHeader, payload) {
if onlyAck {
if ack := p.acks.GetAckFrame(encLevel, now, true); ack != nil {
- return p.getLongHeader(encLevel, v), payload{
- ack: ack,
- length: ack.Length(v),
- }
+ hdr := p.getLongHeader(encLevel, v)
+ maxPacketSize -= hdr.GetLength(v)
+ ack.Truncate(maxPacketSize, v)
+ return hdr, payload{ack: ack, length: ack.Length(v)}
}
- return nil, payload{}
+ return nil, payload{length: 0}
}
var hasCryptoData func() bool
@@ -527,10 +525,7 @@ func (p *packetPacker) maybeGetCryptoPacket(
handler := p.retransmissionQueue.AckHandler(encLevel)
hasRetransmission := p.retransmissionQueue.HasData(encLevel)
- var ack *wire.AckFrame
- if ackAllowed {
- ack = p.acks.GetAckFrame(encLevel, now, !hasRetransmission && !hasCryptoData())
- }
+ ack := p.acks.GetAckFrame(encLevel, now, !hasRetransmission && !hasCryptoData())
var pl payload
if !hasCryptoData() && !hasRetransmission && ack == nil {
if !addPingIfEmpty {
@@ -542,13 +537,15 @@ func (p *packetPacker) maybeGetCryptoPacket(
pl.length += ping.Length(v)
}
+ hdr := p.getLongHeader(encLevel, v)
+ maxPacketSize -= hdr.GetLength(v)
+
if ack != nil {
+ ack.Truncate(maxPacketSize, v)
pl.ack = ack
pl.length = ack.Length(v)
maxPacketSize -= pl.length
}
- hdr := p.getLongHeader(encLevel, v)
- maxPacketSize -= hdr.GetLength(v)
if hasRetransmission {
for {
frame := p.retransmissionQueue.GetFrame(encLevel, maxPacketSize, v)
@@ -591,12 +588,12 @@ func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxSize proto
func (p *packetPacker) maybeGetShortHeaderPacket(
sealer handshake.ShortHeaderSealer,
hdrLen, maxPacketSize protocol.ByteCount,
- onlyAck, ackAllowed bool,
+ onlyAck bool,
now monotime.Time,
v protocol.Version,
) payload {
maxPayloadSize := maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead())
- return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, now, v)
+ return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, true, now, v)
}
func (p *packetPacker) maybeGetAppDataPacket(
@@ -635,6 +632,7 @@ func (p *packetPacker) composeNextPacket(
) payload {
if onlyAck {
if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, now, true); ack != nil {
+ ack.Truncate(maxPayloadSize, v)
return payload{ack: ack, length: ack.Length(v)}
}
return payload{}
@@ -643,13 +641,12 @@ func (p *packetPacker) composeNextPacket(
hasData := p.framer.HasData()
hasRetransmission := p.retransmissionQueue.HasData(protocol.Encryption1RTT)
- var hasAck bool
var pl payload
if ackAllowed {
if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, now, !hasRetransmission && !hasData); ack != nil {
+ ack.Truncate(maxPayloadSize, v)
pl.ack = ack
pl.length += ack.Length(v)
- hasAck = true
}
}
@@ -660,7 +657,7 @@ func (p *packetPacker) composeNextPacket(
pl.frames = append(pl.frames, ackhandler.Frame{Frame: f})
pl.length += size
p.datagramQueue.Pop()
- } else if !hasAck {
+ } else if pl.ack == nil {
// The DATAGRAM frame doesn't fit, and the packet doesn't contain an ACK.
// Discard this frame. There's no point in retrying this in the next packet,
// as it's unlikely that the available packet size will increase.
@@ -670,7 +667,7 @@ func (p *packetPacker) composeNextPacket(
}
}
- if hasAck && !hasData && !hasRetransmission {
+ if pl.ack != nil && !hasData && !hasRetransmission {
return pl
}
@@ -747,7 +744,6 @@ func (p *packetPacker) PackPTOProbePacket(
now,
addPingIfEmpty,
false,
- true,
v,
)
if pl.length == 0 {
diff --git a/vendor/github.com/quic-go/quic-go/qlog/event.go b/vendor/github.com/quic-go/quic-go/qlog/event.go
index 77cdff232..83ca71f13 100644
--- a/vendor/github.com/quic-go/quic-go/qlog/event.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/event.go
@@ -214,6 +214,7 @@ func (e ConnectionClosed) Encode(enc *jsontext.Encoder, _ time.Time) error {
type PacketSent struct {
Header PacketHeader
Raw RawInfo
+ DatagramID DatagramID
Frames []Frame
ECN ECN
IsCoalesced bool
@@ -234,6 +235,10 @@ func (e PacketSent) Encode(enc *jsontext.Encoder, _ time.Time) error {
if err := e.Raw.encode(enc); err != nil {
return err
}
+ if e.DatagramID != 0 {
+ h.WriteToken(jsontext.String("datagram_id"))
+ h.WriteToken(jsontext.Uint(uint64(e.DatagramID)))
+ }
if len(e.Frames) > 0 {
h.WriteToken(jsontext.String("frames"))
if err := frames(e.Frames).encode(enc); err != nil {
@@ -259,6 +264,7 @@ func (e PacketSent) Encode(enc *jsontext.Encoder, _ time.Time) error {
type PacketReceived struct {
Header PacketHeader
Raw RawInfo
+ DatagramID DatagramID
Frames []Frame
ECN ECN
IsCoalesced bool
@@ -278,6 +284,10 @@ func (e PacketReceived) Encode(enc *jsontext.Encoder, _ time.Time) error {
if err := e.Raw.encode(enc); err != nil {
return err
}
+ if e.DatagramID != 0 {
+ h.WriteToken(jsontext.String("datagram_id"))
+ h.WriteToken(jsontext.Uint(uint64(e.DatagramID)))
+ }
if len(e.Frames) > 0 {
h.WriteToken(jsontext.String("frames"))
if err := frames(e.Frames).encode(enc); err != nil {
@@ -345,8 +355,9 @@ func (e VersionNegotiationSent) Encode(enc *jsontext.Encoder, _ time.Time) error
}
type PacketBuffered struct {
- Header PacketHeader
- Raw RawInfo
+ Header PacketHeader
+ Raw RawInfo
+ DatagramID DatagramID
}
func (e PacketBuffered) Name() string { return "transport:packet_buffered" }
@@ -362,6 +373,10 @@ func (e PacketBuffered) Encode(enc *jsontext.Encoder, _ time.Time) error {
if err := e.Raw.encode(enc); err != nil {
return err
}
+ if e.DatagramID != 0 {
+ h.WriteToken(jsontext.String("datagram_id"))
+ h.WriteToken(jsontext.Uint(uint64(e.DatagramID)))
+ }
h.WriteToken(jsontext.String("trigger"))
h.WriteToken(jsontext.String("keys_unavailable"))
h.WriteToken(jsontext.EndObject)
@@ -370,9 +385,10 @@ func (e PacketBuffered) Encode(enc *jsontext.Encoder, _ time.Time) error {
// PacketDropped is the transport:packet_dropped event.
type PacketDropped struct {
- Header PacketHeader
- Raw RawInfo
- Trigger PacketDropReason
+ Header PacketHeader
+ Raw RawInfo
+ DatagramID DatagramID
+ Trigger PacketDropReason
}
func (e PacketDropped) Name() string { return "transport:packet_dropped" }
@@ -388,6 +404,10 @@ func (e PacketDropped) Encode(enc *jsontext.Encoder, _ time.Time) error {
if err := e.Raw.encode(enc); err != nil {
return err
}
+ if e.DatagramID != 0 {
+ h.WriteToken(jsontext.String("datagram_id"))
+ h.WriteToken(jsontext.Uint(uint64(e.DatagramID)))
+ }
h.WriteToken(jsontext.String("trigger"))
h.WriteToken(jsontext.String(string(e.Trigger)))
h.WriteToken(jsontext.EndObject)
@@ -805,3 +825,25 @@ func (e ALPNInformation) Encode(enc *jsontext.Encoder, _ time.Time) error {
h.WriteToken(jsontext.EndObject)
return h.err
}
+
+// DebugEvent is a generic event that can be used to log arbitrary messages.
+type DebugEvent struct {
+ EventName string
+ Message string
+}
+
+func (e DebugEvent) Name() string {
+ if e.EventName == "" {
+ return "transport:debug"
+ }
+ return fmt.Sprintf("transport:%s", e.EventName)
+}
+
+func (e DebugEvent) Encode(enc *jsontext.Encoder, _ time.Time) error {
+ h := encoderHelper{enc: enc}
+ h.WriteToken(jsontext.BeginObject)
+ h.WriteToken(jsontext.String("message"))
+ h.WriteToken(jsontext.String(e.Message))
+ h.WriteToken(jsontext.EndObject)
+ return h.err
+}
diff --git a/vendor/github.com/quic-go/quic-go/qlog/types.go b/vendor/github.com/quic-go/quic-go/qlog/types.go
index 4cd2bb64c..dfa4066d2 100644
--- a/vendor/github.com/quic-go/quic-go/qlog/types.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/types.go
@@ -2,6 +2,7 @@ package qlog
import (
"fmt"
+ "hash/crc32"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
@@ -293,3 +294,11 @@ const (
// StatelessReset indicates the connection was closed due to receiving a stateless reset from the peer
ConnectionCloseTriggerStatelessReset ConnectionCloseTrigger = "stateless_reset"
)
+
+// DatagramID is a unique identifier for a datagram
+type DatagramID uint32
+
+// CalculateDatagramID computes a DatagramID for a given packet
+func CalculateDatagramID(packet []byte) DatagramID {
+ return DatagramID(crc32.ChecksumIEEE(packet))
+}
diff --git a/vendor/github.com/quic-go/quic-go/qlogwriter/writer.go b/vendor/github.com/quic-go/quic-go/qlogwriter/writer.go
index c2921d91d..d728f596f 100644
--- a/vendor/github.com/quic-go/quic-go/qlogwriter/writer.go
+++ b/vendor/github.com/quic-go/quic-go/qlogwriter/writer.go
@@ -28,9 +28,11 @@ type Trace interface {
// It is safe for concurrent use by multiple goroutines.
type Recorder interface {
// RecordEvent records a single Event to the trace.
+ // It must not be called after Close.
RecordEvent(Event)
// Close signals that this producer is done recording events.
// When all producers are closed, the underlying trace is closed.
+ // It must not be called concurrently with RecordEvent.
io.Closer
}
@@ -67,6 +69,7 @@ type FileSeq struct {
runStopped chan struct{}
encodeErr error
events chan event
+ done chan struct{}
mx sync.Mutex
producers int
@@ -115,6 +118,7 @@ func newFileSeq(w io.WriteCloser, pers string, odcid *ConnectionID, eventSchemas
runStopped: make(chan struct{}),
encodeErr: encodeErr,
events: make(chan event, eventChanSize),
+ done: make(chan struct{}),
eventSchemas: eventSchemas,
}
}
@@ -150,55 +154,64 @@ func (t *FileSeq) record(eventTime time.Time, details Event) {
func (t *FileSeq) Run() {
defer close(t.runStopped)
- enc := jsontext.NewEncoder(t.w)
- for e := range t.events {
- if t.encodeErr != nil { // if encoding failed, just continue draining the event channel
- continue
- }
- if _, err := t.w.Write(recordSeparator); err != nil {
- t.encodeErr = err
- continue
+ for {
+ select {
+ case <-t.done:
+ for {
+ select {
+ case e := <-t.events:
+ t.encodeEvent(e)
+ default:
+ if t.encodeErr != nil {
+ log.Printf("exporting qlog failed: %s\n", t.encodeErr)
+ }
+ return
+ }
+ }
+ case e := <-t.events:
+ t.encodeEvent(e)
}
+ }
+}
- h := encoderHelper{enc: enc}
- h.WriteToken(jsontext.BeginObject)
- h.WriteToken(jsontext.String("time"))
- h.WriteToken(jsontext.Float(float64(e.Time.Sub(t.referenceTime).Nanoseconds()) / 1e6))
- h.WriteToken(jsontext.String("name"))
- h.WriteToken(jsontext.String(e.Event.Name()))
- h.WriteToken(jsontext.String("data"))
- if err := e.Event.Encode(enc, e.Time); err != nil {
- t.encodeErr = err
- continue
- }
- h.WriteToken(jsontext.EndObject)
- if h.err != nil {
- t.encodeErr = h.err
- }
+func (t *FileSeq) encodeEvent(e event) {
+ if t.encodeErr != nil {
+ return
+ }
+ if _, err := t.w.Write(recordSeparator); err != nil {
+ t.encodeErr = err
+ return
+ }
+ h := encoderHelper{enc: t.enc}
+ h.WriteToken(jsontext.BeginObject)
+ h.WriteToken(jsontext.String("time"))
+ h.WriteToken(jsontext.Float(float64(e.Time.Sub(t.referenceTime).Nanoseconds()) / 1e6))
+ h.WriteToken(jsontext.String("name"))
+ h.WriteToken(jsontext.String(e.Event.Name()))
+ h.WriteToken(jsontext.String("data"))
+ if err := e.Event.Encode(t.enc, e.Time); err != nil {
+ t.encodeErr = err
+ return
+ }
+ h.WriteToken(jsontext.EndObject)
+ if h.err != nil {
+ t.encodeErr = h.err
}
}
func (t *FileSeq) removeProducer() {
t.mx.Lock()
- defer t.mx.Unlock()
-
- if t.closed {
- return
- }
t.producers--
- if t.producers == 0 {
+ last := t.producers == 0
+ if last {
t.closed = true
- t.close()
- t.w.Close()
}
-}
+ t.mx.Unlock()
-func (t *FileSeq) close() {
- close(t.events)
- <-t.runStopped
- if t.encodeErr != nil {
- log.Printf("exporting qlog failed: %s\n", t.encodeErr)
- return
+ if last {
+ close(t.done)
+ <-t.runStopped // wait for Run to drain and exit
+ _ = t.w.Close()
}
}
diff --git a/vendor/github.com/quic-go/quic-go/quicvarint/io.go b/vendor/github.com/quic-go/quic-go/quicvarint/io.go
index 5c3453645..8ea10acda 100644
--- a/vendor/github.com/quic-go/quic-go/quicvarint/io.go
+++ b/vendor/github.com/quic-go/quic-go/quicvarint/io.go
@@ -13,6 +13,31 @@ type Reader interface {
var _ Reader = &bytes.Reader{}
+// A Peeker can peek bytes without consuming them.
+type Peeker interface {
+ Peek(b []byte) (int, error)
+}
+
+// Peek reads a number in the QUIC varint format without consuming bytes.
+func Peek(p Peeker) (uint64, error) {
+ var b [8]byte
+
+ // first peek 1 byte to determine the varint length
+ if _, err := p.Peek(b[:1]); err != nil {
+ return 0, err
+ }
+
+ l := 1 << (b[0] >> 6) // 1, 2, 4, or 8 bytes
+ if l == 1 {
+ return uint64(b[0] & 0b00111111), nil
+ }
+ if _, err := p.Peek(b[:l]); err != nil {
+ return 0, err
+ }
+ val, _, err := Parse(b[:l])
+ return val, err
+}
+
type byteReader struct {
io.Reader
}
@@ -58,7 +83,7 @@ type byteWriter struct {
var _ Writer = &byteWriter{}
// NewWriter returns a Writer for w.
-// If r already implements both io.ByteWriter and io.Writer, NewWriter returns w.
+// If w already implements both io.ByteWriter and io.Writer, NewWriter returns w.
// Otherwise, w is wrapped to add the missing interfaces.
func NewWriter(w io.Writer) Writer {
if w, ok := w.(Writer); ok {
diff --git a/vendor/github.com/quic-go/quic-go/receive_stream.go b/vendor/github.com/quic-go/quic-go/receive_stream.go
index b875ecded..10a8777f4 100644
--- a/vendor/github.com/quic-go/quic-go/receive_stream.go
+++ b/vendor/github.com/quic-go/quic-go/receive_stream.go
@@ -131,7 +131,7 @@ func (s *ReceiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnW
s.errorRead = true
return false, false, 0, io.EOF
}
- if s.cancelledLocally || (s.cancelledRemotely && s.readPos >= s.reliableSize) {
+ if s.cancelledLocally || s.isRemoteCancellationEffective() {
s.errorRead = true
return false, false, 0, s.cancelErr
}
@@ -154,22 +154,14 @@ func (s *ReceiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnW
if s.closeForShutdownErr != nil {
return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, s.closeForShutdownErr
}
- if s.cancelledLocally || (s.cancelledRemotely && s.readPos >= s.reliableSize) {
+ if s.cancelledLocally || s.isRemoteCancellationEffective() {
s.errorRead = true
return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, s.cancelErr
}
deadline := s.deadline
- if !deadline.IsZero() {
- if !monotime.Now().Before(deadline) {
- return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, errDeadline
- }
- if deadlineTimer == nil {
- deadlineTimer = time.NewTimer(monotime.Until(deadline))
- defer deadlineTimer.Stop()
- } else {
- deadlineTimer.Reset(monotime.Until(deadline))
- }
+ if !deadline.IsZero() && !monotime.Now().Before(deadline) {
+ return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, errDeadline
}
if s.currentFrame != nil || s.currentFrameIsLast {
@@ -180,15 +172,19 @@ func (s *ReceiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnW
if deadline.IsZero() {
<-s.readChan
} else {
+ if deadlineTimer == nil {
+ deadlineTimer = time.NewTimer(monotime.Until(deadline))
+ defer deadlineTimer.Stop()
+ } else {
+ deadlineTimer.Reset(monotime.Until(deadline))
+ }
select {
case <-s.readChan:
case <-deadlineTimer.C:
}
}
s.mutex.Lock()
- if s.currentFrame == nil {
- s.dequeueNextFrame()
- }
+ s.dequeueNextFrame()
}
if bytesRead > len(p) {
@@ -201,7 +197,7 @@ func (s *ReceiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnW
// when a RESET_STREAM was received, the flow controller was already
// informed about the final offset for this stream
- if !s.cancelledRemotely || s.readPos < s.reliableSize {
+ if !s.isRemoteCancellationEffective() {
hasStream, hasConn := s.flowController.AddBytesRead(protocol.ByteCount(m))
if hasStream {
s.queuedMaxStreamData = true
@@ -216,7 +212,7 @@ func (s *ReceiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnW
s.readPos += protocol.ByteCount(m)
bytesRead += m
- if s.cancelledRemotely && s.readPos >= s.reliableSize {
+ if s.isRemoteCancellationEffective() {
s.flowController.Abandon()
}
@@ -229,13 +225,134 @@ func (s *ReceiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnW
return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, io.EOF
}
}
- if s.cancelledRemotely && s.readPos >= s.reliableSize {
+ if s.isRemoteCancellationEffective() {
s.errorRead = true
return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, s.cancelErr
}
return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, nil
}
+// isRemoteCancellationEffective returns whether the stream was cancelled remotely
+// and all reliable data has been read.
+func (s *ReceiveStream) isRemoteCancellationEffective() bool {
+ return s.cancelledRemotely && s.readPos >= s.reliableSize
+}
+
+// Peek fills b with stream data, without consuming the stream data.
+// It blocks until len(b) bytes are available, or an error occurs.
+// It respects the stream deadline set by SetReadDeadline.
+// If the stream ends before len(b) bytes are available,
+// it returns the number of bytes peeked along with io.EOF.
+func (s *ReceiveStream) Peek(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+
+ // prevent concurrent use with Read
+ s.readOnce <- struct{}{}
+ defer func() { <-s.readOnce }()
+
+ return s.peekImpl(b)
+}
+
+func (s *ReceiveStream) peekImpl(b []byte) (int, error) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ var deadlineTimer *time.Timer
+
+ for {
+ if s.currentFrameIsLast && s.currentFrame == nil {
+ return 0, io.EOF
+ }
+ if s.cancelledLocally || s.isRemoteCancellationEffective() {
+ return 0, s.cancelErr
+ }
+ if s.closeForShutdownErr != nil {
+ return 0, s.closeForShutdownErr
+ }
+
+ deadline := s.deadline
+ if !deadline.IsZero() && !monotime.Now().Before(deadline) {
+ return 0, errDeadline
+ }
+
+ if s.currentFrame == nil || s.readPosInFrame >= len(s.currentFrame) {
+ s.dequeueNextFrame()
+ }
+
+ if s.currentFrame != nil && s.readPosInFrame < len(s.currentFrame) {
+ availableInCurrentFrame := len(s.currentFrame) - s.readPosInFrame
+
+ if availableInCurrentFrame >= len(b) {
+ copy(b, s.currentFrame[s.readPosInFrame:])
+ return len(b), nil
+ }
+
+ offset := s.readPos + protocol.ByteCount(availableInCurrentFrame)
+ // First peek, then copy.
+ // This avoids copying data if there's not enough data in the queue.
+ if err := s.frameQueue.Peek(offset, b[availableInCurrentFrame:]); err == nil {
+ copy(b[:availableInCurrentFrame], s.currentFrame[s.readPosInFrame:])
+ return len(b), nil
+ }
+
+ if s.currentFrameIsLast {
+ copy(b[:availableInCurrentFrame], s.currentFrame[s.readPosInFrame:])
+ return availableInCurrentFrame, io.EOF
+ }
+
+ // If the stream was remotely cancelled and the request extends beyond the reliable size,
+ // return the data available with the cancel error (once it's all received).
+ if s.cancelledRemotely && s.readPos+protocol.ByteCount(len(b)) > s.reliableSize {
+ total := int(s.reliableSize - s.readPos)
+ needed := total - availableInCurrentFrame
+ // only return once all available data is contiguous
+ if needed <= 0 || s.frameQueue.Peek(offset, b[availableInCurrentFrame:total]) == nil {
+ copy(b[:availableInCurrentFrame], s.currentFrame[s.readPosInFrame:])
+ return total, s.cancelErr
+ }
+ }
+
+ // If the request extends beyond the stream's final offset,
+ // return the data available with EOF (once it's all received).
+ if s.readPos+protocol.ByteCount(len(b)) > s.finalOffset {
+ total := int(s.finalOffset - s.readPos)
+ needed := total - availableInCurrentFrame
+ // only return once all available data is contiguous
+ if needed <= 0 || s.frameQueue.Peek(offset, b[availableInCurrentFrame:total]) == nil {
+ copy(b[:availableInCurrentFrame], s.currentFrame[s.readPosInFrame:])
+ return total, io.EOF
+ }
+ }
+ }
+
+ if s.currentFrameIsLast || s.readPos >= s.finalOffset {
+ return 0, io.EOF
+ }
+
+ s.mutex.Unlock()
+ if deadline.IsZero() {
+ <-s.readChan
+ } else {
+ if deadlineTimer == nil {
+ deadlineTimer = time.NewTimer(monotime.Until(deadline))
+ defer deadlineTimer.Stop()
+ } else {
+ deadlineTimer.Reset(monotime.Until(deadline))
+ }
+ select {
+ case <-s.readChan:
+ case <-deadlineTimer.C:
+ }
+ }
+ s.mutex.Lock()
+ if s.currentFrame == nil || s.readPosInFrame >= len(s.currentFrame) {
+ s.dequeueNextFrame()
+ }
+ }
+}
+
func (s *ReceiveStream) dequeueNextFrame() {
var offset protocol.ByteCount
// We're done with the last frame. Release the buffer.
diff --git a/vendor/github.com/quic-go/quic-go/server.go b/vendor/github.com/quic-go/quic-go/server.go
index a632ba342..7bbf930d1 100644
--- a/vendor/github.com/quic-go/quic-go/server.go
+++ b/vendor/github.com/quic-go/quic-go/server.go
@@ -800,7 +800,6 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error
} else {
cancel = cancel1
}
- ctx = context.WithValue(ctx, ConnectionTracingKey, nextConnTracingID())
var qlogTrace qlogwriter.Trace
if config.Tracer != nil {
// Use the same connection ID that is passed to the client's GetLogWriter callback.
diff --git a/vendor/github.com/quic-go/quic-go/stream.go b/vendor/github.com/quic-go/quic-go/stream.go
index 7b5b8d09c..7248f76e2 100644
--- a/vendor/github.com/quic-go/quic-go/stream.go
+++ b/vendor/github.com/quic-go/quic-go/stream.go
@@ -117,6 +117,15 @@ func (s *Stream) Read(p []byte) (int, error) {
return s.receiveStr.Read(p)
}
+// Peek fills b with stream data, without consuming the stream data.
+// It blocks until len(b) bytes are available, or an error occurs.
+// It respects the stream deadline set by SetReadDeadline.
+// If the stream ends before len(b) bytes are available,
+// it returns the number of bytes peeked along with io.EOF.
+func (s *Stream) Peek(b []byte) (int, error) {
+ return s.receiveStr.Peek(b)
+}
+
// Write writes data to the stream.
// Write can be made to time out using [Stream.SetWriteDeadline] or [Stream.SetDeadline].
// If the stream was canceled, the error is a [StreamError].
@@ -124,6 +133,14 @@ func (s *Stream) Write(p []byte) (int, error) {
return s.sendStr.Write(p)
}
+// SetReliableBoundary marks the data written to this stream so far as reliable.
+// It is valid to call this function multiple times, thereby increasing the reliable size.
+// It only has an effect if the peer enabled support for the RESET_STREAM_AT extension,
+// otherwise, it is a no-op.
+func (s *Stream) SetReliableBoundary() {
+ s.sendStr.SetReliableBoundary()
+}
+
// CancelWrite aborts sending on this stream.
// See [SendStream.CancelWrite] for more details.
func (s *Stream) CancelWrite(errorCode StreamErrorCode) {
diff --git a/vendor/github.com/quic-go/quic-go/transport.go b/vendor/github.com/quic-go/quic-go/transport.go
index 891ee21b7..740c9b53b 100644
--- a/vendor/github.com/quic-go/quic-go/transport.go
+++ b/vendor/github.com/quic-go/quic-go/transport.go
@@ -283,9 +283,6 @@ func (t *Transport) doDial(
return nil, err
}
- tracingID := nextConnTracingID()
- ctx = context.WithValue(ctx, ConnectionTracingKey, tracingID)
-
t.mutex.Lock()
if t.closeErr != nil {
t.mutex.Unlock()
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go
similarity index 89%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go
index 239adaf6c..dbf368d3d 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"encoding/hex"
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go
deleted file mode 100644
index 72b5f5752..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
-
-import (
- "encoding/hex"
- "errors"
-
- "github.com/gogo/protobuf/proto"
-
- "go.opentelemetry.io/collector/pdata/internal/json"
-)
-
-const profileIDSize = 16
-
-var (
- errMarshalProfileID = errors.New("marshal: invalid buffer length for ProfileID")
- errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length")
-)
-
-// ProfileID is a custom data type that is used for all profile_id fields in OTLP
-// Protobuf messages.
-type ProfileID [profileIDSize]byte
-
-var _ proto.Sizer = (*ProfileID)(nil)
-
-// Size returns the size of the data to serialize.
-func (tid ProfileID) Size() int {
- if tid.IsEmpty() {
- return 0
- }
- return profileIDSize
-}
-
-// IsEmpty returns true if id contains at leas one non-zero byte.
-func (tid ProfileID) IsEmpty() bool {
- return tid == [profileIDSize]byte{}
-}
-
-// MarshalTo converts profile ID into a binary representation. Called by Protobuf serialization.
-func (tid ProfileID) MarshalTo(data []byte) (n int, err error) {
- if tid.IsEmpty() {
- return 0, nil
- }
-
- if len(data) < profileIDSize {
- return 0, errMarshalProfileID
- }
-
- return copy(data, tid[:]), nil
-}
-
-// Unmarshal inflates this profile ID from binary representation. Called by Protobuf serialization.
-func (tid *ProfileID) Unmarshal(data []byte) error {
- if len(data) == 0 {
- *tid = [profileIDSize]byte{}
- return nil
- }
-
- if len(data) != profileIDSize {
- return errUnmarshalProfileID
- }
-
- copy(tid[:], data)
- return nil
-}
-
-// MarshalJSONStream converts ProfileID into a hex string.
-func (tid ProfileID) MarshalJSONStream(dest *json.Stream) {
- dest.WriteString(hex.EncodeToString(tid[:]))
-}
-
-// UnmarshalJSONIter decodes ProfileID from hex string.
-func (tid *ProfileID) UnmarshalJSONIter(iter *json.Iterator) {
- *tid = [profileIDSize]byte{}
- unmarshalJSON(tid[:], iter)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go
deleted file mode 100644
index 53b69b072..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go
+++ /dev/null
@@ -1,840 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/logs/v1/logs_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportLogsServiceRequest struct {
- // An array of ResourceLogs.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
-}
-
-func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} }
-func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportLogsServiceRequest) ProtoMessage() {}
-func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_8e3bf87aaa43acd4, []int{0}
-}
-func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src)
-}
-func (m *ExportLogsServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo
-
-func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs {
- if m != nil {
- return m.ResourceLogs
- }
- return nil
-}
-
-type ExportLogsServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportLogsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} }
-func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportLogsServiceResponse) ProtoMessage() {}
-func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_8e3bf87aaa43acd4, []int{1}
-}
-func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src)
-}
-func (m *ExportLogsServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo
-
-func (m *ExportLogsServiceResponse) GetPartialSuccess() ExportLogsPartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportLogsPartialSuccess{}
-}
-
-type ExportLogsPartialSuccess struct {
- // The number of rejected log records.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedLogRecords int64 `protobuf:"varint,1,opt,name=rejected_log_records,json=rejectedLogRecords,proto3" json:"rejected_log_records,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportLogsPartialSuccess) Reset() { *m = ExportLogsPartialSuccess{} }
-func (m *ExportLogsPartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportLogsPartialSuccess) ProtoMessage() {}
-func (*ExportLogsPartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_8e3bf87aaa43acd4, []int{2}
-}
-func (m *ExportLogsPartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportLogsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportLogsPartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportLogsPartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportLogsPartialSuccess.Merge(m, src)
-}
-func (m *ExportLogsPartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportLogsPartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportLogsPartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportLogsPartialSuccess proto.InternalMessageInfo
-
-func (m *ExportLogsPartialSuccess) GetRejectedLogRecords() int64 {
- if m != nil {
- return m.RejectedLogRecords
- }
- return 0
-}
-
-func (m *ExportLogsPartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest")
- proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse")
- proto.RegisterType((*ExportLogsPartialSuccess)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4)
-}
-
-var fileDescriptor_8e3bf87aaa43acd4 = []byte{
- // 430 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xc1, 0x6e, 0x13, 0x31,
- 0x10, 0x86, 0xd7, 0x2d, 0xaa, 0x84, 0xd3, 0x02, 0xb2, 0x7a, 0x08, 0x39, 0x2c, 0x55, 0x50, 0x51,
- 0xb8, 0x78, 0x49, 0xb8, 0x70, 0x03, 0x05, 0x71, 0x0b, 0x10, 0x6d, 0x11, 0x07, 0x2e, 0xab, 0xc5,
- 0x19, 0x59, 0x5b, 0x6d, 0x77, 0xdc, 0xb1, 0x13, 0xc1, 0x33, 0x20, 0x24, 0x5e, 0x80, 0x17, 0xe0,
- 0x49, 0x7a, 0xe0, 0xd0, 0x23, 0x27, 0x84, 0x92, 0x17, 0x41, 0x5e, 0x97, 0xb0, 0x0b, 0x39, 0x04,
- 0x4e, 0xbb, 0x1e, 0xcf, 0xff, 0xfd, 0xff, 0xd8, 0x32, 0x7f, 0x84, 0x06, 0x2a, 0x07, 0x25, 0x9c,
- 0x81, 0xa3, 0xf7, 0x89, 0x21, 0x74, 0x98, 0x28, 0x2c, 0x4b, 0x50, 0x0e, 0x29, 0x29, 0x51, 0xdb,
- 0x64, 0x31, 0xac, 0xbf, 0x99, 0x05, 0x5a, 0x14, 0x0a, 0x64, 0xdd, 0x24, 0x8e, 0x5b, 0xca, 0x50,
- 0x94, 0x6b, 0xa5, 0xf4, 0x0a, 0xb9, 0x18, 0xf6, 0x0e, 0x35, 0x6a, 0x0c, 0x58, 0xff, 0x17, 0xfa,
- 0x7a, 0xf7, 0x36, 0xd9, 0x36, 0xcd, 0x42, 0x5f, 0xff, 0x94, 0x77, 0x9f, 0xbd, 0x33, 0x48, 0x6e,
- 0x82, 0xda, 0x9e, 0x04, 0xff, 0x14, 0xce, 0xe7, 0x60, 0x9d, 0x78, 0xc1, 0x0f, 0x08, 0x2c, 0xce,
- 0x49, 0x41, 0xe6, 0x25, 0x5d, 0x76, 0xb4, 0x3b, 0xe8, 0x8c, 0xee, 0xcb, 0x4d, 0xc1, 0xae, 0xe2,
- 0xc8, 0xf4, 0x4a, 0xe1, 0x79, 0xe9, 0x3e, 0x35, 0x56, 0xfd, 0x0f, 0x8c, 0xdf, 0xde, 0x60, 0x66,
- 0x0d, 0x56, 0x16, 0x44, 0xc5, 0x6f, 0x9a, 0x9c, 0x5c, 0x91, 0x97, 0x99, 0x9d, 0x2b, 0x05, 0xd6,
- 0xfb, 0xb1, 0x41, 0x67, 0xf4, 0x58, 0x6e, 0x75, 0x10, 0xf2, 0x37, 0x7a, 0x1a, 0x38, 0x27, 0x01,
- 0x33, 0xbe, 0x76, 0xf1, 0xfd, 0x4e, 0x94, 0xde, 0x30, 0xad, 0x6a, 0xff, 0xbc, 0x39, 0x79, 0x5b,
- 0x21, 0x1e, 0xf0, 0x43, 0x82, 0x53, 0x50, 0x0e, 0x66, 0x7e, 0xf2, 0x8c, 0x40, 0x21, 0xcd, 0x42,
- 0xa0, 0xdd, 0x54, 0xfc, 0xda, 0x9b, 0xa0, 0x4e, 0xc3, 0x8e, 0xb8, 0xcb, 0x0f, 0x80, 0x08, 0x29,
- 0x3b, 0x03, 0x6b, 0x73, 0x0d, 0xdd, 0x9d, 0x23, 0x36, 0xb8, 0x9e, 0xee, 0xd7, 0xc5, 0xe7, 0xa1,
- 0x36, 0xfa, 0xcc, 0x78, 0xa7, 0x31, 0xba, 0xf8, 0xc8, 0xf8, 0x5e, 0xc8, 0x20, 0xfe, 0x7d, 0xc8,
- 0xf6, 0x65, 0xf5, 0x9e, 0xfc, 0x3f, 0x20, 0x5c, 0x40, 0x3f, 0x1a, 0x7f, 0x65, 0x17, 0xcb, 0x98,
- 0x5d, 0x2e, 0x63, 0xf6, 0x63, 0x19, 0xb3, 0x4f, 0xab, 0x38, 0xba, 0x5c, 0xc5, 0xd1, 0xb7, 0x55,
- 0x1c, 0xf1, 0x41, 0x81, 0xdb, 0x19, 0x8c, 0x6f, 0x35, 0xd8, 0x53, 0xdf, 0x33, 0x65, 0x6f, 0x26,
- 0xfa, 0x4f, 0x75, 0xd1, 0x7c, 0x04, 0x66, 0x96, 0xbb, 0x3c, 0x29, 0x2a, 0x07, 0x54, 0xe5, 0x65,
- 0x52, 0xaf, 0x6a, 0xbc, 0x86, 0xea, 0xef, 0xb7, 0xf2, 0x65, 0xe7, 0xf8, 0xa5, 0x81, 0xea, 0xd5,
- 0x9a, 0x55, 0xbb, 0xc8, 0xa7, 0xeb, 0x24, 0x3e, 0x80, 0x7c, 0x3d, 0x7c, 0xbb, 0x57, 0x33, 0x1e,
- 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xaf, 0x6c, 0x7d, 0x83, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// LogsServiceClient is the client API for LogsService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type LogsServiceClient interface {
- Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error)
-}
-
-type logsServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient {
- return &logsServiceClient{cc}
-}
-
-func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) {
- out := new(ExportLogsServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// LogsServiceServer is the server API for LogsService service.
-type LogsServiceServer interface {
- Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error)
-}
-
-// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedLogsServiceServer struct {
-}
-
-func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) {
- s.RegisterService(&_LogsService_serviceDesc, srv)
-}
-
-func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportLogsServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LogsServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _LogsService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService",
- HandlerType: (*LogsServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _LogsService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto",
-}
-
-func (m *ExportLogsServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportLogsServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportLogsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportLogsServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportLogsServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportLogsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportLogsPartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportLogsPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportLogsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintLogsService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedLogRecords != 0 {
- i = encodeVarintLogsService(dAtA, i, uint64(m.RejectedLogRecords))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintLogsService(dAtA []byte, offset int, v uint64) int {
- offset -= sovLogsService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportLogsServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for _, e := range m.ResourceLogs {
- l = e.Size()
- n += 1 + l + sovLogsService(uint64(l))
- }
- }
- return n
-}
-
-func (m *ExportLogsServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovLogsService(uint64(l))
- return n
-}
-
-func (m *ExportLogsPartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedLogRecords != 0 {
- n += 1 + sovLogsService(uint64(m.RejectedLogRecords))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovLogsService(uint64(l))
- }
- return n
-}
-
-func sovLogsService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozLogsService(x uint64) (n int) {
- return sovLogsService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportLogsServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportLogsServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportLogsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceLogs = append(m.ResourceLogs, &v1.ResourceLogs{})
- if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportLogsServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportLogsServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportLogsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportLogsPartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportLogsPartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportLogsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
- }
- m.RejectedLogRecords = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedLogRecords |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogsService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipLogsService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthLogsService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupLogsService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLogsService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthLogsService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowLogsService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupLogsService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go
deleted file mode 100644
index bfdc29395..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go
+++ /dev/null
@@ -1,840 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportMetricsServiceRequest struct {
- // An array of ResourceMetrics.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
-}
-
-func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
-func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceRequest) ProtoMessage() {}
-func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_75fb6015e6e64798, []int{0}
-}
-func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
-}
-func (m *ExportMetricsServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
-
-func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics {
- if m != nil {
- return m.ResourceMetrics
- }
- return nil
-}
-
-type ExportMetricsServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
-func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceResponse) ProtoMessage() {}
-func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_75fb6015e6e64798, []int{1}
-}
-func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
-}
-func (m *ExportMetricsServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
-
-func (m *ExportMetricsServiceResponse) GetPartialSuccess() ExportMetricsPartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportMetricsPartialSuccess{}
-}
-
-type ExportMetricsPartialSuccess struct {
- // The number of rejected data points.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportMetricsPartialSuccess) Reset() { *m = ExportMetricsPartialSuccess{} }
-func (m *ExportMetricsPartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsPartialSuccess) ProtoMessage() {}
-func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_75fb6015e6e64798, []int{2}
-}
-func (m *ExportMetricsPartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportMetricsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportMetricsPartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportMetricsPartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsPartialSuccess.Merge(m, src)
-}
-func (m *ExportMetricsPartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportMetricsPartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsPartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsPartialSuccess proto.InternalMessageInfo
-
-func (m *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 {
- if m != nil {
- return m.RejectedDataPoints
- }
- return 0
-}
-
-func (m *ExportMetricsPartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest")
- proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse")
- proto.RegisterType((*ExportMetricsPartialSuccess)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798)
-}
-
-var fileDescriptor_75fb6015e6e64798 = []byte{
- // 427 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xbf, 0x8e, 0xd3, 0x30,
- 0x18, 0x8f, 0xef, 0xd0, 0x49, 0xf8, 0xe0, 0x0e, 0x99, 0x1b, 0x4e, 0x05, 0x85, 0x53, 0x58, 0x22,
- 0x81, 0x1c, 0x5a, 0x76, 0x86, 0xc2, 0xb1, 0x9d, 0x1a, 0xa5, 0x88, 0xa1, 0x4b, 0x64, 0xdc, 0x4f,
- 0x51, 0x50, 0x1a, 0x1b, 0xdb, 0xad, 0xe8, 0x5b, 0x30, 0xb0, 0xf0, 0x0a, 0x88, 0x07, 0xe9, 0xd8,
- 0xb1, 0x13, 0x42, 0xed, 0x8b, 0xa0, 0xc4, 0x69, 0xc1, 0x25, 0x43, 0xc5, 0x6d, 0xce, 0xcf, 0xdf,
- 0xef, 0x4f, 0x7e, 0xd6, 0x87, 0x5f, 0x09, 0x09, 0xa5, 0x81, 0x02, 0x26, 0x60, 0xd4, 0x3c, 0x92,
- 0x4a, 0x18, 0x11, 0x71, 0x51, 0x14, 0xc0, 0x8d, 0x50, 0x51, 0x85, 0xe6, 0x5c, 0x47, 0xb3, 0xee,
- 0xf6, 0x98, 0x6a, 0x50, 0xb3, 0x9c, 0x03, 0xad, 0x47, 0x49, 0xe8, 0xf0, 0x2d, 0x48, 0x77, 0x7c,
- 0xda, 0x90, 0xe8, 0xac, 0xdb, 0xb9, 0xc8, 0x44, 0x26, 0xac, 0x7e, 0x75, 0xb2, 0xa3, 0x9d, 0xe7,
- 0x6d, 0xfe, 0xff, 0xba, 0xda, 0xe9, 0x60, 0x8e, 0x1f, 0x5d, 0x7f, 0x96, 0x42, 0x99, 0x1b, 0x0b,
- 0x0f, 0x6d, 0x96, 0x04, 0x3e, 0x4d, 0x41, 0x1b, 0x32, 0xc2, 0x0f, 0x14, 0x68, 0x31, 0x55, 0x1c,
- 0xd2, 0x86, 0x78, 0x89, 0xae, 0x8e, 0xc3, 0xd3, 0x5e, 0x44, 0xdb, 0x72, 0xfe, 0x49, 0x47, 0x93,
- 0x86, 0xd7, 0x08, 0x27, 0xe7, 0xca, 0x05, 0x82, 0xaf, 0x08, 0x3f, 0x6e, 0xf7, 0xd6, 0x52, 0x94,
- 0x1a, 0x88, 0xc1, 0xe7, 0x92, 0x29, 0x93, 0xb3, 0x22, 0xd5, 0x53, 0xce, 0x41, 0x57, 0xde, 0x28,
- 0x3c, 0xed, 0x5d, 0xd3, 0x43, 0x3b, 0xa2, 0x8e, 0x41, 0x6c, 0xd5, 0x86, 0x56, 0xac, 0x7f, 0x67,
- 0xf1, 0xf3, 0x89, 0x97, 0x9c, 0x49, 0x07, 0x0d, 0xcc, 0x5e, 0x23, 0x2e, 0x89, 0xbc, 0xc0, 0x17,
- 0x0a, 0x3e, 0x02, 0x37, 0x30, 0x4e, 0xc7, 0xcc, 0xb0, 0x54, 0x8a, 0xbc, 0x34, 0x36, 0xd9, 0x71,
- 0x42, 0xb6, 0x77, 0x6f, 0x98, 0x61, 0x71, 0x7d, 0x43, 0x9e, 0xe2, 0xfb, 0xa0, 0x94, 0x50, 0xe9,
- 0x04, 0xb4, 0x66, 0x19, 0x5c, 0x1e, 0x5d, 0xa1, 0xf0, 0x6e, 0x72, 0xaf, 0x06, 0x6f, 0x2c, 0xd6,
- 0xfb, 0x81, 0xf0, 0x99, 0x5b, 0x03, 0xf9, 0x86, 0xf0, 0x89, 0x4d, 0x42, 0xfe, 0xf7, 0x87, 0xdd,
- 0xd7, 0xec, 0xbc, 0xbd, 0xad, 0x8c, 0x7d, 0x98, 0xc0, 0xeb, 0xaf, 0xd0, 0x62, 0xed, 0xa3, 0xe5,
- 0xda, 0x47, 0xbf, 0xd6, 0x3e, 0xfa, 0xb2, 0xf1, 0xbd, 0xe5, 0xc6, 0xf7, 0x56, 0x1b, 0xdf, 0xc3,
- 0xcf, 0x72, 0x71, 0xb0, 0x4d, 0xff, 0xa1, 0xeb, 0x10, 0x57, 0x93, 0x31, 0x1a, 0x0d, 0xb2, 0x7d,
- 0x8d, 0xfc, 0xef, 0x1d, 0x92, 0x55, 0xf1, 0x51, 0x5e, 0x1a, 0x50, 0x25, 0x2b, 0xa2, 0xfa, 0xab,
- 0x36, 0xc9, 0xa0, 0x6c, 0x5d, 0xb5, 0xef, 0x47, 0xe1, 0x40, 0x42, 0xf9, 0x6e, 0x27, 0x57, 0x1b,
- 0xd1, 0xd7, 0xbb, 0x48, 0x4d, 0x0c, 0xfa, 0xbe, 0xfb, 0xe1, 0xa4, 0x56, 0x7a, 0xf9, 0x3b, 0x00,
- 0x00, 0xff, 0xff, 0x47, 0xf2, 0x5f, 0x42, 0xc8, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// MetricsServiceClient is the client API for MetricsService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MetricsServiceClient interface {
- Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error)
-}
-
-type metricsServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
- return &metricsServiceClient{cc}
-}
-
-func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) {
- out := new(ExportMetricsServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// MetricsServiceServer is the server API for MetricsService service.
-type MetricsServiceServer interface {
- Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error)
-}
-
-// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedMetricsServiceServer struct {
-}
-
-func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
- s.RegisterService(&_MetricsService_serviceDesc, srv)
-}
-
-func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportMetricsServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MetricsServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _MetricsService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
- HandlerType: (*MetricsServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _MetricsService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
-}
-
-func (m *ExportMetricsServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportMetricsServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportMetricsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetricsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportMetricsServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportMetricsServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportMetricsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetricsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportMetricsPartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportMetricsPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportMetricsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintMetricsService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedDataPoints != 0 {
- i = encodeVarintMetricsService(dAtA, i, uint64(m.RejectedDataPoints))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintMetricsService(dAtA []byte, offset int, v uint64) int {
- offset -= sovMetricsService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportMetricsServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for _, e := range m.ResourceMetrics {
- l = e.Size()
- n += 1 + l + sovMetricsService(uint64(l))
- }
- }
- return n
-}
-
-func (m *ExportMetricsServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovMetricsService(uint64(l))
- return n
-}
-
-func (m *ExportMetricsPartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedDataPoints != 0 {
- n += 1 + sovMetricsService(uint64(m.RejectedDataPoints))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovMetricsService(uint64(l))
- }
- return n
-}
-
-func sovMetricsService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozMetricsService(x uint64) (n int) {
- return sovMetricsService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportMetricsServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetricsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetricsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceMetrics = append(m.ResourceMetrics, &v1.ResourceMetrics{})
- if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetricsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetricsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportMetricsServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportMetricsServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportMetricsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetricsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetricsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetricsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetricsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportMetricsPartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportMetricsPartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportMetricsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
- }
- m.RejectedDataPoints = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedDataPoints |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetricsService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetricsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetricsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetricsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipMetricsService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthMetricsService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupMetricsService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMetricsService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthMetricsService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMetricsService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupMetricsService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go
deleted file mode 100644
index 80eae38f5..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go
+++ /dev/null
@@ -1,897 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/profiles/v1development/profiles_service.proto
-
-package v1development
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1development "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportProfilesServiceRequest struct {
- // An array of ResourceProfiles.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceProfiles []*v1development.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"`
- // The reference table containing all data shared by profiles across the message being sent.
- Dictionary v1development.ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"`
-}
-
-func (m *ExportProfilesServiceRequest) Reset() { *m = ExportProfilesServiceRequest{} }
-func (m *ExportProfilesServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportProfilesServiceRequest) ProtoMessage() {}
-func (*ExportProfilesServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_ad3943ce836e7720, []int{0}
-}
-func (m *ExportProfilesServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportProfilesServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportProfilesServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportProfilesServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportProfilesServiceRequest.Merge(m, src)
-}
-func (m *ExportProfilesServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportProfilesServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportProfilesServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportProfilesServiceRequest proto.InternalMessageInfo
-
-func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1development.ResourceProfiles {
- if m != nil {
- return m.ResourceProfiles
- }
- return nil
-}
-
-func (m *ExportProfilesServiceRequest) GetDictionary() v1development.ProfilesDictionary {
- if m != nil {
- return m.Dictionary
- }
- return v1development.ProfilesDictionary{}
-}
-
-type ExportProfilesServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportProfilesPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportProfilesServiceResponse) Reset() { *m = ExportProfilesServiceResponse{} }
-func (m *ExportProfilesServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportProfilesServiceResponse) ProtoMessage() {}
-func (*ExportProfilesServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_ad3943ce836e7720, []int{1}
-}
-func (m *ExportProfilesServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportProfilesServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportProfilesServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportProfilesServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportProfilesServiceResponse.Merge(m, src)
-}
-func (m *ExportProfilesServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportProfilesServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportProfilesServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportProfilesServiceResponse proto.InternalMessageInfo
-
-func (m *ExportProfilesServiceResponse) GetPartialSuccess() ExportProfilesPartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportProfilesPartialSuccess{}
-}
-
-type ExportProfilesPartialSuccess struct {
- // The number of rejected profiles.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedProfiles int64 `protobuf:"varint,1,opt,name=rejected_profiles,json=rejectedProfiles,proto3" json:"rejected_profiles,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportProfilesPartialSuccess) Reset() { *m = ExportProfilesPartialSuccess{} }
-func (m *ExportProfilesPartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportProfilesPartialSuccess) ProtoMessage() {}
-func (*ExportProfilesPartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_ad3943ce836e7720, []int{2}
-}
-func (m *ExportProfilesPartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportProfilesPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportProfilesPartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportProfilesPartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportProfilesPartialSuccess.Merge(m, src)
-}
-func (m *ExportProfilesPartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportProfilesPartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportProfilesPartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportProfilesPartialSuccess proto.InternalMessageInfo
-
-func (m *ExportProfilesPartialSuccess) GetRejectedProfiles() int64 {
- if m != nil {
- return m.RejectedProfiles
- }
- return 0
-}
-
-func (m *ExportProfilesPartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceRequest")
- proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceResponse")
- proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesPartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", fileDescriptor_ad3943ce836e7720)
-}
-
-var fileDescriptor_ad3943ce836e7720 = []byte{
- // 467 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x8a, 0xd3, 0x40,
- 0x18, 0xc7, 0x33, 0xbb, 0xb2, 0xe0, 0xac, 0xba, 0x1a, 0xf6, 0xb0, 0x14, 0x8d, 0x4b, 0xbc, 0x14,
- 0x84, 0x09, 0x5b, 0x17, 0x44, 0x10, 0x0f, 0x75, 0x3d, 0x89, 0x18, 0x52, 0xf1, 0xa0, 0x87, 0x10,
- 0x27, 0x9f, 0x61, 0x24, 0x9d, 0x19, 0x67, 0xa6, 0xc5, 0x1e, 0x7d, 0x03, 0xdf, 0xc1, 0x9b, 0x57,
- 0x1f, 0xc2, 0x1e, 0x7b, 0xf4, 0x24, 0xd2, 0x3e, 0x80, 0x4f, 0x20, 0x48, 0x32, 0x4d, 0x6c, 0x42,
- 0xa5, 0x58, 0x7a, 0xcb, 0x7c, 0xc3, 0xff, 0xf7, 0xff, 0x7f, 0xdf, 0x17, 0x06, 0x3f, 0x15, 0x12,
- 0xb8, 0x81, 0x1c, 0x86, 0x60, 0xd4, 0x24, 0x90, 0x4a, 0x18, 0x11, 0x50, 0x91, 0xe7, 0x40, 0x8d,
- 0x50, 0xc5, 0xf9, 0x2d, 0xcb, 0x41, 0x07, 0xe3, 0xb3, 0x14, 0xc6, 0x90, 0x0b, 0x39, 0x04, 0x6e,
- 0xea, 0x72, 0xac, 0x41, 0x8d, 0x19, 0x05, 0x52, 0xea, 0xdc, 0xf3, 0x06, 0xcc, 0x16, 0x49, 0x0d,
- 0x23, 0x95, 0x8a, 0x34, 0x60, 0x9d, 0xe3, 0x4c, 0x64, 0xc2, 0x1a, 0x17, 0x5f, 0x56, 0xd6, 0x79,
- 0xb0, 0x2e, 0xd8, 0x86, 0x38, 0x56, 0xea, 0xff, 0x42, 0xf8, 0xe6, 0x93, 0x0f, 0x52, 0x28, 0x13,
- 0x2e, 0x2f, 0x06, 0x36, 0x66, 0x04, 0xef, 0x47, 0xa0, 0x8d, 0xcb, 0xf0, 0x0d, 0x05, 0x5a, 0x8c,
- 0x14, 0x85, 0xb8, 0xd2, 0x9e, 0xa0, 0xd3, 0xfd, 0xee, 0x61, 0xef, 0x21, 0x59, 0xd7, 0xc3, 0xfa,
- 0xe4, 0x24, 0x5a, 0x42, 0x2a, 0x9b, 0xe8, 0xba, 0x6a, 0x55, 0xdc, 0x14, 0xe3, 0x94, 0x51, 0xc3,
- 0x04, 0x4f, 0xd4, 0xe4, 0x64, 0xef, 0x14, 0x75, 0x0f, 0x7b, 0x8f, 0xfe, 0xc7, 0xa3, 0x22, 0x5d,
- 0xd4, 0x94, 0xfe, 0xa5, 0xe9, 0x8f, 0xdb, 0x4e, 0xb4, 0xc2, 0xf5, 0x3f, 0x23, 0x7c, 0xeb, 0x1f,
- 0x1d, 0x6b, 0x29, 0xb8, 0x06, 0xf7, 0x23, 0xc2, 0x47, 0x32, 0x51, 0x86, 0x25, 0x79, 0xac, 0x47,
- 0x94, 0x82, 0x2e, 0x3a, 0x2e, 0xd2, 0x44, 0x64, 0x9b, 0xad, 0x91, 0xa6, 0x5d, 0x68, 0xd1, 0x03,
- 0x4b, 0x5e, 0x26, 0xbc, 0x26, 0x1b, 0x55, 0x5f, 0xb6, 0xd7, 0xd2, 0x54, 0xb9, 0x77, 0x8b, 0xb5,
- 0xbc, 0x03, 0x6a, 0x20, 0x5d, 0x5d, 0x0b, 0xea, 0xee, 0x17, 0x83, 0xb5, 0x17, 0xf5, 0x60, 0xef,
- 0xe0, 0xab, 0xa0, 0x94, 0x50, 0xf1, 0x10, 0xb4, 0x4e, 0x32, 0x28, 0x67, 0x7b, 0x39, 0xba, 0x52,
- 0x16, 0x9f, 0xd9, 0x5a, 0xef, 0x1b, 0xc2, 0x47, 0xad, 0x89, 0xb8, 0x5f, 0x11, 0x3e, 0xb0, 0x31,
- 0xdc, 0x9d, 0xb4, 0xde, 0xfc, 0xb7, 0x3a, 0x83, 0x9d, 0x32, 0xed, 0xf6, 0x7c, 0xa7, 0xff, 0x1b,
- 0x4d, 0xe7, 0x1e, 0x9a, 0xcd, 0x3d, 0xf4, 0x73, 0xee, 0xa1, 0x4f, 0x0b, 0xcf, 0x99, 0x2d, 0x3c,
- 0xe7, 0xfb, 0xc2, 0x73, 0xf0, 0x7d, 0x26, 0xb6, 0xf2, 0xec, 0x1f, 0xb7, 0xec, 0xc2, 0x42, 0x16,
- 0xa2, 0x57, 0xaf, 0xb3, 0x36, 0x90, 0x35, 0xde, 0x84, 0x34, 0x31, 0x49, 0xc0, 0xb8, 0x01, 0xc5,
- 0x93, 0x3c, 0x28, 0x4f, 0xa5, 0x63, 0x06, 0x7c, 0xe3, 0xd3, 0xf1, 0x65, 0xef, 0xfc, 0xb9, 0x04,
- 0xfe, 0xa2, 0x46, 0x97, 0xa6, 0xe4, 0x71, 0x9d, 0xb5, 0xca, 0x44, 0x5e, 0x9e, 0x5d, 0xfc, 0x95,
- 0xbd, 0x39, 0x28, 0x1d, 0xee, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x40, 0xb9, 0xb5, 0x6e, 0xb0,
- 0x04, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// ProfilesServiceClient is the client API for ProfilesService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ProfilesServiceClient interface {
- Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error)
-}
-
-type profilesServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient {
- return &profilesServiceClient{cc}
-}
-
-func (c *profilesServiceClient) Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) {
- out := new(ExportProfilesServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// ProfilesServiceServer is the server API for ProfilesService service.
-type ProfilesServiceServer interface {
- Export(context.Context, *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error)
-}
-
-// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedProfilesServiceServer struct {
-}
-
-func (*UnimplementedProfilesServiceServer) Export(ctx context.Context, req *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) {
- s.RegisterService(&_ProfilesService_serviceDesc, srv)
-}
-
-func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportProfilesServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ProfilesServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ProfilesServiceServer).Export(ctx, req.(*ExportProfilesServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _ProfilesService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService",
- HandlerType: (*ProfilesServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _ProfilesService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto",
-}
-
-func (m *ExportProfilesServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportProfilesServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportProfilesServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfilesService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if len(m.ResourceProfiles) > 0 {
- for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfilesService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportProfilesServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportProfilesServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportProfilesServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfilesService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportProfilesPartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportProfilesPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportProfilesPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintProfilesService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedProfiles != 0 {
- i = encodeVarintProfilesService(dAtA, i, uint64(m.RejectedProfiles))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintProfilesService(dAtA []byte, offset int, v uint64) int {
- offset -= sovProfilesService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportProfilesServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceProfiles) > 0 {
- for _, e := range m.ResourceProfiles {
- l = e.Size()
- n += 1 + l + sovProfilesService(uint64(l))
- }
- }
- l = m.Dictionary.Size()
- n += 1 + l + sovProfilesService(uint64(l))
- return n
-}
-
-func (m *ExportProfilesServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovProfilesService(uint64(l))
- return n
-}
-
-func (m *ExportProfilesPartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedProfiles != 0 {
- n += 1 + sovProfilesService(uint64(m.RejectedProfiles))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovProfilesService(uint64(l))
- }
- return n
-}
-
-func sovProfilesService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozProfilesService(x uint64) (n int) {
- return sovProfilesService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportProfilesServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportProfilesServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportProfilesServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceProfiles = append(m.ResourceProfiles, &v1development.ResourceProfiles{})
- if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfilesService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfilesService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportProfilesServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportProfilesServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportProfilesServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfilesService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfilesService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportProfilesPartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportProfilesPartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportProfilesPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
- }
- m.RejectedProfiles = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedProfiles |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfilesService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfilesService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipProfilesService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthProfilesService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupProfilesService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthProfilesService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthProfilesService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowProfilesService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupProfilesService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go
deleted file mode 100644
index 5b547b8a7..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go
+++ /dev/null
@@ -1,839 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportTraceServiceRequest struct {
- // An array of ResourceSpans.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
-}
-
-func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
-func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceRequest) ProtoMessage() {}
-func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_192a962890318cf4, []int{0}
-}
-func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
-}
-func (m *ExportTraceServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
-
-func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans {
- if m != nil {
- return m.ResourceSpans
- }
- return nil
-}
-
-type ExportTraceServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
-func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceResponse) ProtoMessage() {}
-func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_192a962890318cf4, []int{1}
-}
-func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
-}
-func (m *ExportTraceServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
-
-func (m *ExportTraceServiceResponse) GetPartialSuccess() ExportTracePartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportTracePartialSuccess{}
-}
-
-type ExportTracePartialSuccess struct {
- // The number of rejected spans.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportTracePartialSuccess) Reset() { *m = ExportTracePartialSuccess{} }
-func (m *ExportTracePartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportTracePartialSuccess) ProtoMessage() {}
-func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_192a962890318cf4, []int{2}
-}
-func (m *ExportTracePartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportTracePartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportTracePartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportTracePartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTracePartialSuccess.Merge(m, src)
-}
-func (m *ExportTracePartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportTracePartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTracePartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTracePartialSuccess proto.InternalMessageInfo
-
-func (m *ExportTracePartialSuccess) GetRejectedSpans() int64 {
- if m != nil {
- return m.RejectedSpans
- }
- return 0
-}
-
-func (m *ExportTracePartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest")
- proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse")
- proto.RegisterType((*ExportTracePartialSuccess)(nil), "opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4)
-}
-
-var fileDescriptor_192a962890318cf4 = []byte{
- // 413 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4f, 0xeb, 0xd3, 0x30,
- 0x18, 0x6e, 0x36, 0x19, 0x98, 0xfd, 0x11, 0x8b, 0x87, 0xd9, 0x43, 0x1d, 0x15, 0x47, 0x45, 0x48,
- 0xd9, 0xbc, 0x79, 0xb3, 0xe2, 0x71, 0x38, 0xba, 0xe1, 0xc1, 0xcb, 0x88, 0xdd, 0x4b, 0xa9, 0x74,
- 0x4d, 0x4c, 0xb2, 0xa1, 0x5f, 0x42, 0xf4, 0x2b, 0x78, 0xf4, 0x93, 0xec, 0xb8, 0xa3, 0x27, 0x91,
- 0xed, 0x8b, 0x48, 0x12, 0x2d, 0xad, 0xf4, 0x30, 0x7e, 0xbf, 0x5b, 0xf2, 0xf0, 0x3e, 0x7f, 0xde,
- 0x27, 0x04, 0xbf, 0x60, 0x1c, 0x4a, 0x05, 0x05, 0xec, 0x40, 0x89, 0xcf, 0x11, 0x17, 0x4c, 0xb1,
- 0x28, 0x65, 0x45, 0x01, 0xa9, 0x62, 0x22, 0x52, 0x82, 0xa6, 0x10, 0x1d, 0x66, 0xf6, 0xb0, 0x91,
- 0x20, 0x0e, 0x79, 0x0a, 0xc4, 0x8c, 0xb9, 0xd3, 0x06, 0xd7, 0x82, 0xa4, 0xe2, 0x12, 0x43, 0x21,
- 0x87, 0x99, 0xf7, 0x20, 0x63, 0x19, 0xb3, 0xca, 0xfa, 0x64, 0x07, 0xbd, 0xb0, 0xcd, 0xb9, 0xe9,
- 0x67, 0x27, 0x03, 0x86, 0x1f, 0xbe, 0xfe, 0xc4, 0x99, 0x50, 0x6b, 0x0d, 0xae, 0x6c, 0x86, 0x04,
- 0x3e, 0xee, 0x41, 0x2a, 0x37, 0xc1, 0x23, 0x01, 0x92, 0xed, 0x85, 0x8e, 0xc7, 0x69, 0x29, 0xc7,
- 0x68, 0xd2, 0x0d, 0xfb, 0xf3, 0x67, 0xa4, 0x2d, 0xdd, 0xbf, 0x4c, 0x24, 0xf9, 0xcb, 0x59, 0x69,
- 0x4a, 0x32, 0x14, 0xf5, 0x6b, 0xf0, 0x05, 0x61, 0xaf, 0xcd, 0x51, 0x72, 0x56, 0x4a, 0x70, 0x39,
- 0xbe, 0xc7, 0xa9, 0x50, 0x39, 0x2d, 0x36, 0x72, 0x9f, 0xa6, 0x20, 0xb5, 0x27, 0x0a, 0xfb, 0xf3,
- 0x97, 0xe4, 0xba, 0x46, 0x48, 0x4d, 0x7c, 0x69, 0x95, 0x56, 0x56, 0x28, 0xbe, 0x73, 0xfc, 0xf5,
- 0xc8, 0x49, 0x46, 0xbc, 0x81, 0x06, 0x59, 0xa3, 0x81, 0x26, 0xc5, 0x7d, 0xa2, 0x1b, 0xf8, 0x00,
- 0xa9, 0x82, 0x6d, 0xd5, 0x00, 0x0a, 0xbb, 0x7a, 0x29, 0x8b, 0x9a, 0xa5, 0xdc, 0xc7, 0x78, 0x08,
- 0x42, 0x30, 0xb1, 0xd9, 0x81, 0x94, 0x34, 0x83, 0x71, 0x67, 0x82, 0xc2, 0xbb, 0xc9, 0xc0, 0x80,
- 0x0b, 0x8b, 0xcd, 0xbf, 0x23, 0x3c, 0xa8, 0xef, 0xec, 0x7e, 0x43, 0xb8, 0x67, 0xad, 0xdd, 0x9b,
- 0x6c, 0xd7, 0x7c, 0x2c, 0x2f, 0xbe, 0x8d, 0x84, 0x6d, 0x3f, 0x70, 0xe2, 0x13, 0x3a, 0x9e, 0x7d,
- 0x74, 0x3a, 0xfb, 0xe8, 0xf7, 0xd9, 0x47, 0x5f, 0x2f, 0xbe, 0x73, 0xba, 0xf8, 0xce, 0xcf, 0x8b,
- 0xef, 0xe0, 0xa7, 0x39, 0xbb, 0xd2, 0x22, 0xbe, 0x5f, 0x57, 0x5f, 0xea, 0xa9, 0x25, 0x7a, 0xb7,
- 0xc8, 0xfe, 0xe7, 0xe7, 0xf5, 0xef, 0xc0, 0xb7, 0x54, 0xd1, 0x28, 0x2f, 0x15, 0x88, 0x92, 0x16,
- 0x91, 0xb9, 0x19, 0x83, 0x0c, 0xca, 0x96, 0x5f, 0xf3, 0xa3, 0x33, 0x7d, 0xc3, 0xa1, 0x5c, 0x57,
- 0x62, 0xc6, 0x86, 0xbc, 0xaa, 0xc2, 0x98, 0x08, 0xe4, 0xed, 0xec, 0x7d, 0xcf, 0xa8, 0x3c, 0xff,
- 0x13, 0x00, 0x00, 0xff, 0xff, 0x82, 0xce, 0x78, 0xc7, 0x8f, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// TraceServiceClient is the client API for TraceService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type TraceServiceClient interface {
- Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
-}
-
-type traceServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
- return &traceServiceClient{cc}
-}
-
-func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) {
- out := new(ExportTraceServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// TraceServiceServer is the server API for TraceService service.
-type TraceServiceServer interface {
- Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
-}
-
-// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedTraceServiceServer struct {
-}
-
-func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
- s.RegisterService(&_TraceService_serviceDesc, srv)
-}
-
-func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportTraceServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TraceServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _TraceService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
- HandlerType: (*TraceServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _TraceService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
-}
-
-func (m *ExportTraceServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportTraceServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportTraceServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTraceService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportTraceServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportTraceServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportTraceServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTraceService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportTracePartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportTracePartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportTracePartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintTraceService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedSpans != 0 {
- i = encodeVarintTraceService(dAtA, i, uint64(m.RejectedSpans))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintTraceService(dAtA []byte, offset int, v uint64) int {
- offset -= sovTraceService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportTraceServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for _, e := range m.ResourceSpans {
- l = e.Size()
- n += 1 + l + sovTraceService(uint64(l))
- }
- }
- return n
-}
-
-func (m *ExportTraceServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovTraceService(uint64(l))
- return n
-}
-
-func (m *ExportTracePartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedSpans != 0 {
- n += 1 + sovTraceService(uint64(m.RejectedSpans))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovTraceService(uint64(l))
- }
- return n
-}
-
-func sovTraceService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozTraceService(x uint64) (n int) {
- return sovTraceService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportTraceServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportTraceServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportTraceServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTraceService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTraceService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceSpans = append(m.ResourceSpans, &v1.ResourceSpans{})
- if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTraceService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTraceService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportTraceServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportTraceServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportTraceServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTraceService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTraceService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTraceService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTraceService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportTracePartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportTracePartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportTracePartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
- }
- m.RejectedSpans = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedSpans |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTraceService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTraceService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTraceService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTraceService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipTraceService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthTraceService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupTraceService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTraceService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthTraceService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTraceService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupTraceService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go
deleted file mode 100644
index 179aa9d5d..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go
+++ /dev/null
@@ -1,2080 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/common/v1/common.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// AnyValue is used to represent any type of attribute value. AnyValue may contain a
-// primitive value such as a string or integer or it may contain an arbitrary nested
-// object containing arrays, key-value lists and primitives.
-type AnyValue struct {
- // The value is one of the listed fields. It is valid for all values to be unspecified
- // in which case this AnyValue is considered to be "empty".
- //
- // Types that are valid to be assigned to Value:
- // *AnyValue_StringValue
- // *AnyValue_BoolValue
- // *AnyValue_IntValue
- // *AnyValue_DoubleValue
- // *AnyValue_ArrayValue
- // *AnyValue_KvlistValue
- // *AnyValue_BytesValue
- Value isAnyValue_Value `protobuf_oneof:"value"`
-}
-
-func (m *AnyValue) Reset() { *m = AnyValue{} }
-func (m *AnyValue) String() string { return proto.CompactTextString(m) }
-func (*AnyValue) ProtoMessage() {}
-func (*AnyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{0}
-}
-func (m *AnyValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AnyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AnyValue.Merge(m, src)
-}
-func (m *AnyValue) XXX_Size() int {
- return m.Size()
-}
-func (m *AnyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_AnyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AnyValue proto.InternalMessageInfo
-
-type isAnyValue_Value interface {
- isAnyValue_Value()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type AnyValue_StringValue struct {
- StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"`
-}
-type AnyValue_BoolValue struct {
- BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"`
-}
-type AnyValue_IntValue struct {
- IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"`
-}
-type AnyValue_DoubleValue struct {
- DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"`
-}
-type AnyValue_ArrayValue struct {
- ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"`
-}
-type AnyValue_KvlistValue struct {
- KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"`
-}
-type AnyValue_BytesValue struct {
- BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof" json:"bytes_value,omitempty"`
-}
-
-func (*AnyValue_StringValue) isAnyValue_Value() {}
-func (*AnyValue_BoolValue) isAnyValue_Value() {}
-func (*AnyValue_IntValue) isAnyValue_Value() {}
-func (*AnyValue_DoubleValue) isAnyValue_Value() {}
-func (*AnyValue_ArrayValue) isAnyValue_Value() {}
-func (*AnyValue_KvlistValue) isAnyValue_Value() {}
-func (*AnyValue_BytesValue) isAnyValue_Value() {}
-
-func (m *AnyValue) GetValue() isAnyValue_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *AnyValue) GetStringValue() string {
- if x, ok := m.GetValue().(*AnyValue_StringValue); ok {
- return x.StringValue
- }
- return ""
-}
-
-func (m *AnyValue) GetBoolValue() bool {
- if x, ok := m.GetValue().(*AnyValue_BoolValue); ok {
- return x.BoolValue
- }
- return false
-}
-
-func (m *AnyValue) GetIntValue() int64 {
- if x, ok := m.GetValue().(*AnyValue_IntValue); ok {
- return x.IntValue
- }
- return 0
-}
-
-func (m *AnyValue) GetDoubleValue() float64 {
- if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok {
- return x.DoubleValue
- }
- return 0
-}
-
-func (m *AnyValue) GetArrayValue() *ArrayValue {
- if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok {
- return x.ArrayValue
- }
- return nil
-}
-
-func (m *AnyValue) GetKvlistValue() *KeyValueList {
- if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok {
- return x.KvlistValue
- }
- return nil
-}
-
-func (m *AnyValue) GetBytesValue() []byte {
- if x, ok := m.GetValue().(*AnyValue_BytesValue); ok {
- return x.BytesValue
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*AnyValue) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*AnyValue_StringValue)(nil),
- (*AnyValue_BoolValue)(nil),
- (*AnyValue_IntValue)(nil),
- (*AnyValue_DoubleValue)(nil),
- (*AnyValue_ArrayValue)(nil),
- (*AnyValue_KvlistValue)(nil),
- (*AnyValue_BytesValue)(nil),
- }
-}
-
-// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
-// since oneof in AnyValue does not allow repeated fields.
-type ArrayValue struct {
- // Array of values. The array may be empty (contain 0 elements).
- Values []AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"`
-}
-
-func (m *ArrayValue) Reset() { *m = ArrayValue{} }
-func (m *ArrayValue) String() string { return proto.CompactTextString(m) }
-func (*ArrayValue) ProtoMessage() {}
-func (*ArrayValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{1}
-}
-func (m *ArrayValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ArrayValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ArrayValue.Merge(m, src)
-}
-func (m *ArrayValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ArrayValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ArrayValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ArrayValue proto.InternalMessageInfo
-
-func (m *ArrayValue) GetValues() []AnyValue {
- if m != nil {
- return m.Values
- }
- return nil
-}
-
-// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
-// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
-// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
-// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
-// are semantically equivalent.
-type KeyValueList struct {
- // A collection of key/value pairs of key-value pairs. The list may be empty (may
- // contain 0 elements).
- // The keys MUST be unique (it is not allowed to have more than one
- // value with the same key).
- Values []KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"`
-}
-
-func (m *KeyValueList) Reset() { *m = KeyValueList{} }
-func (m *KeyValueList) String() string { return proto.CompactTextString(m) }
-func (*KeyValueList) ProtoMessage() {}
-func (*KeyValueList) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{2}
-}
-func (m *KeyValueList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KeyValueList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KeyValueList.Merge(m, src)
-}
-func (m *KeyValueList) XXX_Size() int {
- return m.Size()
-}
-func (m *KeyValueList) XXX_DiscardUnknown() {
- xxx_messageInfo_KeyValueList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValueList proto.InternalMessageInfo
-
-func (m *KeyValueList) GetValues() []KeyValue {
- if m != nil {
- return m.Values
- }
- return nil
-}
-
-// KeyValue is a key-value pair that is used to store Span attributes, Link
-// attributes, etc.
-type KeyValue struct {
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"`
-}
-
-func (m *KeyValue) Reset() { *m = KeyValue{} }
-func (m *KeyValue) String() string { return proto.CompactTextString(m) }
-func (*KeyValue) ProtoMessage() {}
-func (*KeyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{3}
-}
-func (m *KeyValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KeyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KeyValue.Merge(m, src)
-}
-func (m *KeyValue) XXX_Size() int {
- return m.Size()
-}
-func (m *KeyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_KeyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValue proto.InternalMessageInfo
-
-func (m *KeyValue) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *KeyValue) GetValue() AnyValue {
- if m != nil {
- return m.Value
- }
- return AnyValue{}
-}
-
-// InstrumentationScope is a message representing the instrumentation scope information
-// such as the fully qualified name and version.
-type InstrumentationScope struct {
- // An empty instrumentation scope name means the name is unknown.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
- // Additional attributes that describe the scope. [Optional].
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"`
- DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
-}
-
-func (m *InstrumentationScope) Reset() { *m = InstrumentationScope{} }
-func (m *InstrumentationScope) String() string { return proto.CompactTextString(m) }
-func (*InstrumentationScope) ProtoMessage() {}
-func (*InstrumentationScope) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{4}
-}
-func (m *InstrumentationScope) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *InstrumentationScope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_InstrumentationScope.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *InstrumentationScope) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InstrumentationScope.Merge(m, src)
-}
-func (m *InstrumentationScope) XXX_Size() int {
- return m.Size()
-}
-func (m *InstrumentationScope) XXX_DiscardUnknown() {
- xxx_messageInfo_InstrumentationScope.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InstrumentationScope proto.InternalMessageInfo
-
-func (m *InstrumentationScope) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *InstrumentationScope) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-func (m *InstrumentationScope) GetAttributes() []KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *InstrumentationScope) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-// A reference to an Entity.
-// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs.
-//
-// Status: [Development]
-type EntityRef struct {
- // The Schema URL, if known. This is the identifier of the Schema that the entity data
- // is recorded in. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- //
- // This schema_url applies to the data in this message and to the Resource attributes
- // referenced by id_keys and description_keys.
- // TODO: discuss if we are happy with this somewhat complicated definition of what
- // the schema_url applies to.
- //
- // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs.
- SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
- // Defines the type of the entity. MUST not change during the lifetime of the entity.
- // For example: "service" or "host". This field is required and MUST not be empty
- // for valid entities.
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- // Attribute Keys that identify the entity.
- // MUST not change during the lifetime of the entity. The Id must contain at least one attribute.
- // These keys MUST exist in the containing {message}.attributes.
- IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"`
- // Descriptive (non-identifying) attribute keys of the entity.
- // MAY change over the lifetime of the entity. MAY be empty.
- // These attribute keys are not part of entity's identity.
- // These keys MUST exist in the containing {message}.attributes.
- DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"`
-}
-
-func (m *EntityRef) Reset() { *m = EntityRef{} }
-func (m *EntityRef) String() string { return proto.CompactTextString(m) }
-func (*EntityRef) ProtoMessage() {}
-func (*EntityRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{5}
-}
-func (m *EntityRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EntityRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_EntityRef.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *EntityRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EntityRef.Merge(m, src)
-}
-func (m *EntityRef) XXX_Size() int {
- return m.Size()
-}
-func (m *EntityRef) XXX_DiscardUnknown() {
- xxx_messageInfo_EntityRef.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityRef proto.InternalMessageInfo
-
-func (m *EntityRef) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-func (m *EntityRef) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *EntityRef) GetIdKeys() []string {
- if m != nil {
- return m.IdKeys
- }
- return nil
-}
-
-func (m *EntityRef) GetDescriptionKeys() []string {
- if m != nil {
- return m.DescriptionKeys
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue")
- proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue")
- proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList")
- proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue")
- proto.RegisterType((*InstrumentationScope)(nil), "opentelemetry.proto.common.v1.InstrumentationScope")
- proto.RegisterType((*EntityRef)(nil), "opentelemetry.proto.common.v1.EntityRef")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817)
-}
-
-var fileDescriptor_62ba46dcb97aa817 = []byte{
- // 608 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41,
- 0x14, 0xdf, 0xa1, 0xa5, 0xed, 0xbe, 0xd6, 0x48, 0x26, 0x44, 0x1b, 0x93, 0x96, 0xb5, 0x1e, 0x5c,
- 0x34, 0x69, 0x03, 0x5e, 0xbc, 0x52, 0x24, 0xa9, 0x01, 0x23, 0x59, 0x84, 0x83, 0x97, 0x66, 0xdb,
- 0x7d, 0xd6, 0x09, 0xdb, 0x99, 0xcd, 0xec, 0xb4, 0xc9, 0x5e, 0xfd, 0x04, 0x7e, 0x0e, 0x2f, 0x7e,
- 0x0d, 0x2e, 0x26, 0x1c, 0x3d, 0x19, 0x02, 0x5f, 0xc4, 0xcc, 0x9f, 0x16, 0xe4, 0x00, 0xc1, 0xdb,
- 0x7b, 0xbf, 0xf7, 0x7b, 0xbf, 0xf7, 0x7e, 0x33, 0x93, 0x81, 0x57, 0x22, 0x43, 0xae, 0x30, 0xc5,
- 0x29, 0x2a, 0x59, 0xf4, 0x32, 0x29, 0x94, 0xe8, 0x8d, 0xc5, 0x74, 0x2a, 0x78, 0x6f, 0xbe, 0xe5,
- 0xa2, 0xae, 0x81, 0x69, 0xeb, 0x1f, 0xae, 0x05, 0xbb, 0x8e, 0x31, 0xdf, 0x7a, 0xb6, 0x3e, 0x11,
- 0x13, 0x61, 0x05, 0x74, 0x64, 0xeb, 0x9d, 0x8b, 0x15, 0xa8, 0xed, 0xf0, 0xe2, 0x24, 0x4e, 0x67,
- 0x48, 0x5f, 0x40, 0x23, 0x57, 0x92, 0xf1, 0xc9, 0x70, 0xae, 0xf3, 0x26, 0x09, 0x48, 0xe8, 0x0f,
- 0xbc, 0xa8, 0x6e, 0x51, 0x4b, 0xda, 0x00, 0x18, 0x09, 0x91, 0x3a, 0xca, 0x4a, 0x40, 0xc2, 0xda,
- 0xc0, 0x8b, 0x7c, 0x8d, 0x59, 0x42, 0x0b, 0x7c, 0xc6, 0x95, 0xab, 0x97, 0x02, 0x12, 0x96, 0x06,
- 0x5e, 0x54, 0x63, 0x5c, 0x2d, 0x87, 0x24, 0x62, 0x36, 0x4a, 0xd1, 0x31, 0xca, 0x01, 0x09, 0x89,
- 0x1e, 0x62, 0x51, 0x4b, 0x3a, 0x80, 0x7a, 0x2c, 0x65, 0x5c, 0x38, 0xce, 0x6a, 0x40, 0xc2, 0xfa,
- 0xf6, 0x66, 0xf7, 0x4e, 0x87, 0xdd, 0x1d, 0xdd, 0x61, 0xfa, 0x07, 0x5e, 0x04, 0xf1, 0x32, 0xa3,
- 0x87, 0xd0, 0x38, 0x9d, 0xa7, 0x2c, 0x5f, 0x2c, 0x55, 0x31, 0x72, 0xaf, 0xef, 0x91, 0xdb, 0x47,
- 0xdb, 0x7e, 0xc0, 0x72, 0xa5, 0xf7, 0xb3, 0x12, 0x56, 0xf1, 0x39, 0xd4, 0x47, 0x85, 0xc2, 0xdc,
- 0x09, 0x56, 0x03, 0x12, 0x36, 0xf4, 0x50, 0x03, 0x1a, 0x4a, 0xbf, 0x0a, 0xab, 0xa6, 0xd8, 0x39,
- 0x02, 0xb8, 0xde, 0x8c, 0xee, 0x41, 0xc5, 0xc0, 0x79, 0x93, 0x04, 0xa5, 0xb0, 0xbe, 0xfd, 0xf2,
- 0x3e, 0x53, 0xee, 0x72, 0xfa, 0xe5, 0xb3, 0x3f, 0x1b, 0x5e, 0xe4, 0x9a, 0x3b, 0xc7, 0xd0, 0xb8,
- 0xb9, 0xdf, 0x83, 0x65, 0x17, 0xcd, 0xb7, 0x64, 0x63, 0xa8, 0x2d, 0x2a, 0x74, 0x0d, 0x4a, 0xa7,
- 0x58, 0xd8, 0x47, 0x10, 0xe9, 0x90, 0xee, 0x3a, 0x4b, 0xe6, 0xd6, 0x1f, 0xbc, 0xba, 0x3b, 0x8e,
- 0x5f, 0x04, 0xd6, 0xdf, 0xf3, 0x5c, 0xc9, 0xd9, 0x14, 0xb9, 0x8a, 0x15, 0x13, 0xfc, 0x68, 0x2c,
- 0x32, 0xa4, 0x14, 0xca, 0x3c, 0x9e, 0xba, 0x57, 0x17, 0x99, 0x98, 0x36, 0xa1, 0x3a, 0x47, 0x99,
- 0x33, 0xc1, 0xcd, 0x4c, 0x3f, 0x5a, 0xa4, 0xf4, 0x03, 0x40, 0xac, 0x94, 0x64, 0xa3, 0x99, 0xc2,
- 0xbc, 0x59, 0xfa, 0x1f, 0xd3, 0x37, 0x04, 0xe8, 0x5b, 0x68, 0x26, 0x52, 0x64, 0x19, 0x26, 0xc3,
- 0x6b, 0x74, 0x38, 0x16, 0x33, 0xae, 0xcc, 0x0b, 0x7d, 0x14, 0x3d, 0x71, 0xf5, 0x9d, 0x65, 0x79,
- 0x57, 0x57, 0x3b, 0xdf, 0x08, 0xf8, 0x7b, 0x5c, 0x31, 0x55, 0x44, 0xf8, 0x85, 0xb6, 0x00, 0xf2,
- 0xf1, 0x57, 0x9c, 0xc6, 0xc3, 0x99, 0x4c, 0x9d, 0x15, 0xdf, 0x22, 0xc7, 0x32, 0xd5, 0x1e, 0x55,
- 0x91, 0xa1, 0x33, 0x63, 0x62, 0xfa, 0x14, 0xaa, 0x2c, 0x19, 0x9e, 0x62, 0x61, 0x6d, 0xf8, 0x51,
- 0x85, 0x25, 0xfb, 0x58, 0xe4, 0x74, 0x13, 0xd6, 0x12, 0xcc, 0xc7, 0x92, 0x65, 0xfa, 0x90, 0x2c,
- 0xa3, 0x6c, 0x18, 0x8f, 0x6f, 0xe0, 0x9a, 0xda, 0xff, 0x49, 0xce, 0x2e, 0xdb, 0xe4, 0xfc, 0xb2,
- 0x4d, 0x2e, 0x2e, 0xdb, 0xe4, 0xfb, 0x55, 0xdb, 0x3b, 0xbf, 0x6a, 0x7b, 0xbf, 0xaf, 0xda, 0x1e,
- 0x04, 0x4c, 0xdc, 0x7d, 0x2c, 0xfd, 0xfa, 0xae, 0x09, 0x0f, 0x35, 0x7c, 0x48, 0x3e, 0xbf, 0x9b,
- 0xdc, 0x6e, 0x60, 0xfa, 0xcf, 0x49, 0x53, 0x1c, 0x2b, 0x21, 0x7b, 0x59, 0x12, 0xab, 0xb8, 0xc7,
- 0xb8, 0x42, 0xc9, 0xe3, 0xb4, 0x67, 0x32, 0xa3, 0x38, 0x41, 0x7e, 0xfd, 0x35, 0xfd, 0x58, 0x69,
- 0x7d, 0xcc, 0x90, 0x7f, 0x5a, 0x6a, 0x18, 0xf5, 0xae, 0x9d, 0xd4, 0x3d, 0xd9, 0x1a, 0x55, 0x4c,
- 0xcf, 0x9b, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x8b, 0xd4, 0x3b, 0xe2, 0x04, 0x00, 0x00,
-}
-
-func (m *AnyValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AnyValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Value != nil {
- {
- size := m.Value.Size()
- i -= size
- if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AnyValue_StringValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= len(m.StringValue)
- copy(dAtA[i:], m.StringValue)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.StringValue)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_BoolValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i--
- if m.BoolValue {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_IntValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintCommon(dAtA, i, uint64(m.IntValue))
- i--
- dAtA[i] = 0x18
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue))))
- i--
- dAtA[i] = 0x21
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_ArrayValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ArrayValue != nil {
- {
- size, err := m.ArrayValue.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_KvlistValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_KvlistValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.KvlistValue != nil {
- {
- size, err := m.KvlistValue.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_BytesValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.BytesValue != nil {
- i -= len(m.BytesValue)
- copy(dAtA[i:], m.BytesValue)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.BytesValue)))
- i--
- dAtA[i] = 0x3a
- }
- return len(dAtA) - i, nil
-}
-func (m *ArrayValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ArrayValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Values) > 0 {
- for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *KeyValueList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KeyValueList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValueList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Values) > 0 {
- for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *KeyValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *InstrumentationScope) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *InstrumentationScope) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InstrumentationScope) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintCommon(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x20
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *EntityRef) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EntityRef) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EntityRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DescriptionKeys) > 0 {
- for iNdEx := len(m.DescriptionKeys) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.DescriptionKeys[iNdEx])
- copy(dAtA[i:], m.DescriptionKeys[iNdEx])
- i = encodeVarintCommon(dAtA, i, uint64(len(m.DescriptionKeys[iNdEx])))
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.IdKeys) > 0 {
- for iNdEx := len(m.IdKeys) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.IdKeys[iNdEx])
- copy(dAtA[i:], m.IdKeys[iNdEx])
- i = encodeVarintCommon(dAtA, i, uint64(len(m.IdKeys[iNdEx])))
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Type) > 0 {
- i -= len(m.Type)
- copy(dAtA[i:], m.Type)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Type)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintCommon(dAtA []byte, offset int, v uint64) int {
- offset -= sovCommon(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *AnyValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Value != nil {
- n += m.Value.Size()
- }
- return n
-}
-
-func (m *AnyValue_StringValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.StringValue)
- n += 1 + l + sovCommon(uint64(l))
- return n
-}
-func (m *AnyValue_BoolValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 2
- return n
-}
-func (m *AnyValue_IntValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovCommon(uint64(m.IntValue))
- return n
-}
-func (m *AnyValue_DoubleValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *AnyValue_ArrayValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ArrayValue != nil {
- l = m.ArrayValue.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- return n
-}
-func (m *AnyValue_KvlistValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.KvlistValue != nil {
- l = m.KvlistValue.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- return n
-}
-func (m *AnyValue_BytesValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.BytesValue != nil {
- l = len(m.BytesValue)
- n += 1 + l + sovCommon(uint64(l))
- }
- return n
-}
-func (m *ArrayValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Values) > 0 {
- for _, e := range m.Values {
- l = e.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- return n
-}
-
-func (m *KeyValueList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Values) > 0 {
- for _, e := range m.Values {
- l = e.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- return n
-}
-
-func (m *KeyValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- l = m.Value.Size()
- n += 1 + l + sovCommon(uint64(l))
- return n
-}
-
-func (m *InstrumentationScope) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovCommon(uint64(m.DroppedAttributesCount))
- }
- return n
-}
-
-func (m *EntityRef) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- l = len(m.Type)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- if len(m.IdKeys) > 0 {
- for _, s := range m.IdKeys {
- l = len(s)
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- if len(m.DescriptionKeys) > 0 {
- for _, s := range m.DescriptionKeys {
- l = len(s)
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- return n
-}
-
-func sovCommon(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozCommon(x uint64) (n int) {
- return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *AnyValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AnyValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = &AnyValue_StringValue{string(dAtA[iNdEx:postIndex])}
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.Value = &AnyValue_BoolValue{b}
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Value = &AnyValue_IntValue{v}
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &AnyValue_DoubleValue{float64(math.Float64frombits(v))}
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &ArrayValue{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Value = &AnyValue_ArrayValue{v}
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &KeyValueList{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Value = &AnyValue_KvlistValue{v}
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := make([]byte, postIndex-iNdEx)
- copy(v, dAtA[iNdEx:postIndex])
- m.Value = &AnyValue_BytesValue{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ArrayValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Values = append(m.Values, AnyValue{})
- if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *KeyValueList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Values = append(m.Values, KeyValue{})
- if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *KeyValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *InstrumentationScope) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: InstrumentationScope: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: InstrumentationScope: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EntityRef) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EntityRef: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EntityRef: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Type = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IdKeys = append(m.IdKeys, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DescriptionKeys = append(m.DescriptionKeys, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipCommon(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthCommon
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupCommon
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthCommon
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go
deleted file mode 100644
index da266d167..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go
+++ /dev/null
@@ -1,1834 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/logs/v1/logs.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// Possible values for LogRecord.SeverityNumber.
-type SeverityNumber int32
-
-const (
- // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
- SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0
- SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1
- SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2
- SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3
- SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4
- SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5
- SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6
- SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7
- SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8
- SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9
- SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10
- SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11
- SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12
- SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13
- SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14
- SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15
- SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16
- SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17
- SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18
- SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19
- SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20
- SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21
- SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22
- SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23
- SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24
-)
-
-var SeverityNumber_name = map[int32]string{
- 0: "SEVERITY_NUMBER_UNSPECIFIED",
- 1: "SEVERITY_NUMBER_TRACE",
- 2: "SEVERITY_NUMBER_TRACE2",
- 3: "SEVERITY_NUMBER_TRACE3",
- 4: "SEVERITY_NUMBER_TRACE4",
- 5: "SEVERITY_NUMBER_DEBUG",
- 6: "SEVERITY_NUMBER_DEBUG2",
- 7: "SEVERITY_NUMBER_DEBUG3",
- 8: "SEVERITY_NUMBER_DEBUG4",
- 9: "SEVERITY_NUMBER_INFO",
- 10: "SEVERITY_NUMBER_INFO2",
- 11: "SEVERITY_NUMBER_INFO3",
- 12: "SEVERITY_NUMBER_INFO4",
- 13: "SEVERITY_NUMBER_WARN",
- 14: "SEVERITY_NUMBER_WARN2",
- 15: "SEVERITY_NUMBER_WARN3",
- 16: "SEVERITY_NUMBER_WARN4",
- 17: "SEVERITY_NUMBER_ERROR",
- 18: "SEVERITY_NUMBER_ERROR2",
- 19: "SEVERITY_NUMBER_ERROR3",
- 20: "SEVERITY_NUMBER_ERROR4",
- 21: "SEVERITY_NUMBER_FATAL",
- 22: "SEVERITY_NUMBER_FATAL2",
- 23: "SEVERITY_NUMBER_FATAL3",
- 24: "SEVERITY_NUMBER_FATAL4",
-}
-
-var SeverityNumber_value = map[string]int32{
- "SEVERITY_NUMBER_UNSPECIFIED": 0,
- "SEVERITY_NUMBER_TRACE": 1,
- "SEVERITY_NUMBER_TRACE2": 2,
- "SEVERITY_NUMBER_TRACE3": 3,
- "SEVERITY_NUMBER_TRACE4": 4,
- "SEVERITY_NUMBER_DEBUG": 5,
- "SEVERITY_NUMBER_DEBUG2": 6,
- "SEVERITY_NUMBER_DEBUG3": 7,
- "SEVERITY_NUMBER_DEBUG4": 8,
- "SEVERITY_NUMBER_INFO": 9,
- "SEVERITY_NUMBER_INFO2": 10,
- "SEVERITY_NUMBER_INFO3": 11,
- "SEVERITY_NUMBER_INFO4": 12,
- "SEVERITY_NUMBER_WARN": 13,
- "SEVERITY_NUMBER_WARN2": 14,
- "SEVERITY_NUMBER_WARN3": 15,
- "SEVERITY_NUMBER_WARN4": 16,
- "SEVERITY_NUMBER_ERROR": 17,
- "SEVERITY_NUMBER_ERROR2": 18,
- "SEVERITY_NUMBER_ERROR3": 19,
- "SEVERITY_NUMBER_ERROR4": 20,
- "SEVERITY_NUMBER_FATAL": 21,
- "SEVERITY_NUMBER_FATAL2": 22,
- "SEVERITY_NUMBER_FATAL3": 23,
- "SEVERITY_NUMBER_FATAL4": 24,
-}
-
-func (x SeverityNumber) String() string {
- return proto.EnumName(SeverityNumber_name, int32(x))
-}
-
-func (SeverityNumber) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{0}
-}
-
-// LogRecordFlags represents constants used to interpret the
-// LogRecord.flags field, which is protobuf 'fixed32' type and is to
-// be used as bit-fields. Each non-zero value defined in this enum is
-// a bit-mask. To extract the bit-field, for example, use an
-// expression like:
-//
-// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK)
-type LogRecordFlags int32
-
-const (
- // The zero value for the enum. Should not be used for comparisons.
- // Instead use bitwise "and" with the appropriate mask as shown above.
- LogRecordFlags_LOG_RECORD_FLAGS_DO_NOT_USE LogRecordFlags = 0
- // Bits 0-7 are used for trace flags.
- LogRecordFlags_LOG_RECORD_FLAGS_TRACE_FLAGS_MASK LogRecordFlags = 255
-)
-
-var LogRecordFlags_name = map[int32]string{
- 0: "LOG_RECORD_FLAGS_DO_NOT_USE",
- 255: "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK",
-}
-
-var LogRecordFlags_value = map[string]int32{
- "LOG_RECORD_FLAGS_DO_NOT_USE": 0,
- "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK": 255,
-}
-
-func (x LogRecordFlags) String() string {
- return proto.EnumName(LogRecordFlags_name, int32(x))
-}
-
-func (LogRecordFlags) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{1}
-}
-
-// LogsData represents the logs data that can be stored in a persistent storage,
-// OR can be embedded by other protocols that transfer OTLP logs data but do not
-// implement the OTLP protocol.
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type LogsData struct {
- // An array of ResourceLogs.
- // For data coming from a single resource this array will typically contain
- // one element. Intermediary nodes that receive data from multiple origins
- // typically batch the data before forwarding further and in that case this
- // array will contain multiple elements.
- ResourceLogs []*ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
-}
-
-func (m *LogsData) Reset() { *m = LogsData{} }
-func (m *LogsData) String() string { return proto.CompactTextString(m) }
-func (*LogsData) ProtoMessage() {}
-func (*LogsData) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{0}
-}
-func (m *LogsData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LogsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LogsData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LogsData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogsData.Merge(m, src)
-}
-func (m *LogsData) XXX_Size() int {
- return m.Size()
-}
-func (m *LogsData) XXX_DiscardUnknown() {
- xxx_messageInfo_LogsData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogsData proto.InternalMessageInfo
-
-func (m *LogsData) GetResourceLogs() []*ResourceLogs {
- if m != nil {
- return m.ResourceLogs
- }
- return nil
-}
-
-// A collection of ScopeLogs from a Resource.
-type ResourceLogs struct {
- DeprecatedScopeLogs []*ScopeLogs `protobuf:"bytes,1000,rep,name=deprecated_scope_logs,json=deprecatedScopeLogs,proto3" json:"deprecated_scope_logs,omitempty"`
- // The resource for the logs in this message.
- // If this field is not set then resource info is unknown.
- Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of ScopeLogs that originate from a resource.
- ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_logs" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceLogs) Reset() { *m = ResourceLogs{} }
-func (m *ResourceLogs) String() string { return proto.CompactTextString(m) }
-func (*ResourceLogs) ProtoMessage() {}
-func (*ResourceLogs) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{1}
-}
-func (m *ResourceLogs) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceLogs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceLogs.Merge(m, src)
-}
-func (m *ResourceLogs) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceLogs) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceLogs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo
-
-func (m *ResourceLogs) GetDeprecatedScopeLogs() []*ScopeLogs {
- if m != nil {
- return m.DeprecatedScopeLogs
- }
- return nil
-}
-
-func (m *ResourceLogs) GetResource() v1.Resource {
- if m != nil {
- return m.Resource
- }
- return v1.Resource{}
-}
-
-func (m *ResourceLogs) GetScopeLogs() []*ScopeLogs {
- if m != nil {
- return m.ScopeLogs
- }
- return nil
-}
-
-func (m *ResourceLogs) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Logs produced by a Scope.
-type ScopeLogs struct {
- // The instrumentation scope information for the logs in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of log records.
- LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the log data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all logs in the "logs" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeLogs) Reset() { *m = ScopeLogs{} }
-func (m *ScopeLogs) String() string { return proto.CompactTextString(m) }
-func (*ScopeLogs) ProtoMessage() {}
-func (*ScopeLogs) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{2}
-}
-func (m *ScopeLogs) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeLogs.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeLogs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeLogs.Merge(m, src)
-}
-func (m *ScopeLogs) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeLogs) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeLogs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeLogs proto.InternalMessageInfo
-
-func (m *ScopeLogs) GetScope() v11.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v11.InstrumentationScope{}
-}
-
-func (m *ScopeLogs) GetLogRecords() []*LogRecord {
- if m != nil {
- return m.LogRecords
- }
- return nil
-}
-
-func (m *ScopeLogs) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A log record according to OpenTelemetry Log Data Model:
-// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md
-type LogRecord struct {
- // time_unix_nano is the time when the event occurred.
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- // Value of 0 indicates unknown or missing timestamp.
- TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // Time when the event was observed by the collection system.
- // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK)
- // this timestamp is typically set at the generation time and is equal to Timestamp.
- // For events originating externally and collected by OpenTelemetry (e.g. using
- // Collector) this is the time when OpenTelemetry's code observed the event measured
- // by the clock of the OpenTelemetry code. This field MUST be set once the event is
- // observed by OpenTelemetry.
- //
- // For converting OpenTelemetry log data to formats that support only one timestamp or
- // when receiving OpenTelemetry log data by recipients that support only one timestamp
- // internally the following logic is recommended:
- // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- // Value of 0 indicates unknown or missing timestamp.
- ObservedTimeUnixNano uint64 `protobuf:"fixed64,11,opt,name=observed_time_unix_nano,json=observedTimeUnixNano,proto3" json:"observed_time_unix_nano,omitempty"`
- // Numerical value of the severity, normalized to values described in Log Data Model.
- // [Optional].
- SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"`
- // The severity text (also known as log level). The original string representation as
- // it is known at the source. [Optional].
- SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"`
- // A value containing the body of the log record. Can be for example a human-readable
- // string message (including multi-line) describing the event in a free form or it can
- // be a structured data composed of arrays and maps of other values. [Optional].
- Body v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body"`
- // Additional attributes that describe the specific event occurrence. [Optional].
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes"`
- DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Flags, a bit field. 8 least significant bits are the trace flags as
- // defined in W3C Trace Context specification. 24 most significant bits are reserved
- // and must be set to 0. Readers must not assume that 24 most significant bits
- // will be zero and must correctly mask the bits when reading 8-bit trace flag (use
- // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional].
- Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"`
- // A unique identifier for a trace. All logs from the same trace share
- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
- // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is optional.
- //
- // The receivers SHOULD assume that the log record is not associated with a
- // trace if any of the following is true:
- // - the field is not present,
- // - the field contains an invalid value.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for a span within a trace, assigned when the span
- // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
- // other than 8 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is optional. If the sender specifies a valid span_id then it SHOULD also
- // specify a valid trace_id.
- //
- // The receivers SHOULD assume that the log record is not associated with a
- // span if any of the following is true:
- // - the field is not present,
- // - the field contains an invalid value.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // A unique identifier of event category/type.
- // All events with the same event_name are expected to conform to the same
- // schema for both their attributes and their body.
- //
- // Recommended to be fully qualified and short (no longer than 256 characters).
- //
- // Presence of event_name on the log record identifies this record
- // as an event.
- //
- // [Optional].
- EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"`
-}
-
-func (m *LogRecord) Reset() { *m = LogRecord{} }
-func (m *LogRecord) String() string { return proto.CompactTextString(m) }
-func (*LogRecord) ProtoMessage() {}
-func (*LogRecord) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{3}
-}
-func (m *LogRecord) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LogRecord) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogRecord.Merge(m, src)
-}
-func (m *LogRecord) XXX_Size() int {
- return m.Size()
-}
-func (m *LogRecord) XXX_DiscardUnknown() {
- xxx_messageInfo_LogRecord.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogRecord proto.InternalMessageInfo
-
-func (m *LogRecord) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *LogRecord) GetObservedTimeUnixNano() uint64 {
- if m != nil {
- return m.ObservedTimeUnixNano
- }
- return 0
-}
-
-func (m *LogRecord) GetSeverityNumber() SeverityNumber {
- if m != nil {
- return m.SeverityNumber
- }
- return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
-}
-
-func (m *LogRecord) GetSeverityText() string {
- if m != nil {
- return m.SeverityText
- }
- return ""
-}
-
-func (m *LogRecord) GetBody() v11.AnyValue {
- if m != nil {
- return m.Body
- }
- return v11.AnyValue{}
-}
-
-func (m *LogRecord) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *LogRecord) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *LogRecord) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *LogRecord) GetEventName() string {
- if m != nil {
- return m.EventName
- }
- return ""
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value)
- proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value)
- proto.RegisterType((*LogsData)(nil), "opentelemetry.proto.logs.v1.LogsData")
- proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs")
- proto.RegisterType((*ScopeLogs)(nil), "opentelemetry.proto.logs.v1.ScopeLogs")
- proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e)
-}
-
-var fileDescriptor_d1c030a3ec7e961e = []byte{
- // 971 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0x41, 0x6f, 0xe2, 0x46,
- 0x1b, 0xc7, 0x71, 0x12, 0x02, 0x4c, 0x08, 0x3b, 0xef, 0x2c, 0xc9, 0xfa, 0x4d, 0x54, 0x42, 0xd3,
- 0x2a, 0xa5, 0xa9, 0x04, 0x0a, 0x50, 0x69, 0x7b, 0xab, 0x09, 0x26, 0xa2, 0x21, 0x10, 0x0d, 0x90,
- 0x2a, 0xdb, 0x4a, 0x96, 0xc1, 0x53, 0x6a, 0xc9, 0xcc, 0x58, 0xf6, 0x80, 0x92, 0x6f, 0xd1, 0x4f,
- 0xd0, 0x4b, 0x0f, 0x95, 0xfa, 0x35, 0xda, 0xc3, 0x1e, 0xf7, 0x58, 0xf5, 0xb0, 0xaa, 0x92, 0x4b,
- 0xbf, 0x45, 0xab, 0x19, 0x0c, 0x21, 0xa9, 0x9d, 0x34, 0x27, 0x66, 0x9e, 0xdf, 0xff, 0xf9, 0x3f,
- 0xcf, 0x78, 0xc6, 0x83, 0xc1, 0x01, 0x73, 0x09, 0xe5, 0xc4, 0x21, 0x63, 0xc2, 0xbd, 0xeb, 0x92,
- 0xeb, 0x31, 0xce, 0x4a, 0x0e, 0x1b, 0xf9, 0xa5, 0xe9, 0x91, 0xfc, 0x2d, 0xca, 0x10, 0xda, 0xbd,
- 0xa7, 0x9b, 0x05, 0x8b, 0x92, 0x4f, 0x8f, 0x76, 0xb2, 0x23, 0x36, 0x62, 0xb3, 0x54, 0x31, 0x9a,
- 0xd1, 0x9d, 0xc3, 0x30, 0xeb, 0x21, 0x1b, 0x8f, 0x19, 0x15, 0xe6, 0xb3, 0x51, 0xa0, 0x2d, 0x86,
- 0x69, 0x3d, 0xe2, 0xb3, 0x89, 0x37, 0x24, 0x42, 0x3d, 0x1f, 0xcf, 0xf4, 0xfb, 0x6f, 0x40, 0xb2,
- 0xc5, 0x46, 0x7e, 0xdd, 0xe4, 0x26, 0x6a, 0x83, 0xcd, 0x39, 0x35, 0x44, 0x47, 0xaa, 0x92, 0x5f,
- 0x2d, 0x6c, 0x94, 0x3f, 0x2d, 0x3e, 0xd2, 0x72, 0x11, 0x07, 0x19, 0xc2, 0x05, 0xa7, 0xbd, 0xa5,
- 0xd9, 0xfe, 0x8f, 0x2b, 0x20, 0xbd, 0x8c, 0xd1, 0x37, 0x60, 0xcb, 0x22, 0xae, 0x47, 0x86, 0x26,
- 0x27, 0x96, 0xe1, 0x0f, 0x99, 0x1b, 0x14, 0xfa, 0x2b, 0x21, 0x2b, 0x1d, 0x3c, 0x5a, 0xa9, 0x2b,
- 0xf4, 0xb2, 0xcc, 0xcb, 0x3b, 0x97, 0x45, 0x10, 0x9d, 0x82, 0xe4, 0xbc, 0xba, 0xaa, 0xe4, 0x95,
- 0xc8, 0xc6, 0x17, 0x0f, 0x60, 0xa9, 0xf9, 0xda, 0xda, 0xdb, 0xf7, 0x7b, 0x31, 0xbc, 0x30, 0x40,
- 0x3a, 0x00, 0x4b, 0xed, 0xad, 0x3c, 0xab, 0xbb, 0x94, 0xbf, 0xe8, 0xe9, 0x03, 0x61, 0xf3, 0x3d,
- 0x19, 0x9b, 0xc6, 0xc4, 0x73, 0xd4, 0xd5, 0xbc, 0x52, 0x48, 0x09, 0x2c, 0x22, 0x7d, 0xcf, 0xd9,
- 0xff, 0x4d, 0x01, 0xa9, 0xbb, 0x05, 0x74, 0x40, 0x5c, 0x66, 0x06, 0xdd, 0x57, 0x42, 0xcb, 0x05,
- 0x9b, 0x3d, 0x3d, 0x2a, 0x36, 0xa9, 0xcf, 0xbd, 0xc9, 0x98, 0x50, 0x6e, 0x72, 0x9b, 0x51, 0xe9,
- 0x13, 0xac, 0x63, 0xe6, 0x83, 0x4e, 0xc0, 0x86, 0xc3, 0x46, 0x86, 0x47, 0x86, 0xcc, 0xb3, 0xfe,
- 0xdb, 0x2a, 0x5a, 0x6c, 0x84, 0xa5, 0x1c, 0x03, 0x67, 0x3e, 0x7c, 0x72, 0x19, 0x3f, 0xc5, 0x41,
- 0x6a, 0x91, 0x88, 0x3e, 0x06, 0x19, 0x6e, 0x8f, 0x89, 0x31, 0xa1, 0xf6, 0x95, 0x41, 0x4d, 0xca,
- 0xe4, 0x7a, 0xd6, 0x71, 0x5a, 0x44, 0xfb, 0xd4, 0xbe, 0x6a, 0x9b, 0x94, 0xa1, 0xcf, 0xc1, 0x2b,
- 0x36, 0xf0, 0x89, 0x37, 0x25, 0x96, 0xf1, 0x40, 0xbe, 0x21, 0xe5, 0xd9, 0x39, 0xee, 0x2d, 0xa7,
- 0xf5, 0xc0, 0x0b, 0x9f, 0x4c, 0x89, 0x67, 0xf3, 0x6b, 0x83, 0x4e, 0xc6, 0x03, 0xe2, 0xa9, 0x2b,
- 0x79, 0xa5, 0x90, 0x29, 0x7f, 0xf6, 0xf8, 0xe6, 0x04, 0x39, 0x6d, 0x99, 0x82, 0x33, 0xfe, 0xbd,
- 0x39, 0xfa, 0x08, 0x6c, 0x2e, 0x5c, 0x39, 0xb9, 0xe2, 0xc1, 0x12, 0xd3, 0xf3, 0x60, 0x8f, 0x5c,
- 0x71, 0xa4, 0x81, 0xb5, 0x01, 0xb3, 0xae, 0xd5, 0xb8, 0xdc, 0x9d, 0x4f, 0x9e, 0xd8, 0x1d, 0x8d,
- 0x5e, 0x5f, 0x98, 0xce, 0x64, 0xbe, 0x23, 0x32, 0x15, 0x9d, 0x01, 0x60, 0x72, 0xee, 0xd9, 0x83,
- 0x09, 0x27, 0xbe, 0xba, 0x2e, 0xf7, 0xe3, 0x29, 0xa3, 0x53, 0x72, 0xcf, 0x68, 0xc9, 0x00, 0xbd,
- 0x06, 0xaa, 0xe5, 0x31, 0xd7, 0x25, 0x96, 0x71, 0x17, 0x35, 0x86, 0x6c, 0x42, 0xb9, 0x9a, 0xc8,
- 0x2b, 0x85, 0x4d, 0xbc, 0x1d, 0x70, 0x6d, 0x81, 0x8f, 0x05, 0x45, 0x59, 0x10, 0xff, 0xce, 0x31,
- 0x47, 0xbe, 0x9a, 0xcc, 0x2b, 0x85, 0x04, 0x9e, 0x4d, 0xd0, 0xb7, 0x20, 0xc9, 0x3d, 0x73, 0x48,
- 0x0c, 0xdb, 0x52, 0x53, 0x79, 0xa5, 0x90, 0xae, 0x69, 0xa2, 0xe6, 0x1f, 0xef, 0xf7, 0xbe, 0x18,
- 0xb1, 0x07, 0x6d, 0xda, 0xe2, 0x06, 0x72, 0x1c, 0x32, 0xe4, 0xcc, 0x2b, 0xb9, 0x96, 0xc9, 0xcd,
- 0x92, 0x4d, 0x39, 0xf1, 0xa8, 0xe9, 0x94, 0xc4, 0xac, 0xd8, 0x13, 0x4e, 0xcd, 0x3a, 0x4e, 0x48,
- 0xcb, 0xa6, 0x85, 0x2e, 0x41, 0xc2, 0x77, 0x4d, 0x2a, 0xcc, 0x81, 0x34, 0xff, 0x32, 0x30, 0x7f,
- 0xfd, 0x7c, 0xf3, 0xae, 0x6b, 0xd2, 0x66, 0x1d, 0xaf, 0x0b, 0xc3, 0xa6, 0x25, 0xce, 0x27, 0x99,
- 0x12, 0xca, 0x0d, 0x6a, 0x8e, 0x89, 0x9a, 0x9e, 0x9d, 0x4f, 0x19, 0x69, 0x9b, 0x63, 0xf2, 0xd5,
- 0x5a, 0x72, 0x0d, 0xc6, 0x0f, 0x7f, 0x8d, 0x83, 0xcc, 0xfd, 0x73, 0x80, 0xf6, 0xc0, 0x6e, 0x57,
- 0xbf, 0xd0, 0x71, 0xb3, 0x77, 0x69, 0xb4, 0xfb, 0x67, 0x35, 0x1d, 0x1b, 0xfd, 0x76, 0xf7, 0x5c,
- 0x3f, 0x6e, 0x36, 0x9a, 0x7a, 0x1d, 0xc6, 0xd0, 0xff, 0xc1, 0xd6, 0x43, 0x41, 0x0f, 0x6b, 0xc7,
- 0x3a, 0x54, 0xd0, 0x0e, 0xd8, 0x0e, 0x45, 0x65, 0xb8, 0x12, 0xc9, 0x2a, 0x70, 0x35, 0x92, 0x55,
- 0xe1, 0x5a, 0x58, 0xb9, 0xba, 0x5e, 0xeb, 0x9f, 0xc0, 0x78, 0x58, 0x9a, 0x44, 0x65, 0xb8, 0x1e,
- 0xc9, 0x2a, 0x30, 0x11, 0xc9, 0xaa, 0x30, 0x89, 0x54, 0x90, 0x7d, 0xc8, 0x9a, 0xed, 0x46, 0x07,
- 0xa6, 0xc2, 0x1a, 0x11, 0xa4, 0x0c, 0x41, 0x14, 0xaa, 0xc0, 0x8d, 0x28, 0x54, 0x85, 0xe9, 0xb0,
- 0x52, 0x5f, 0x6b, 0xb8, 0x0d, 0x37, 0xc3, 0x92, 0x04, 0x29, 0xc3, 0x4c, 0x14, 0xaa, 0xc0, 0x17,
- 0x51, 0xa8, 0x0a, 0x61, 0x18, 0xd2, 0x31, 0xee, 0x60, 0xf8, 0xbf, 0xb0, 0x87, 0x21, 0x51, 0x19,
- 0xa2, 0x48, 0x56, 0x81, 0x2f, 0x23, 0x59, 0x15, 0x66, 0xc3, 0xca, 0x35, 0xb4, 0x9e, 0xd6, 0x82,
- 0x5b, 0x61, 0x69, 0x12, 0x95, 0xe1, 0x76, 0x24, 0xab, 0xc0, 0x57, 0x91, 0xac, 0x0a, 0xd5, 0xc3,
- 0x4b, 0x90, 0x59, 0x5c, 0xb5, 0x0d, 0xf9, 0xd6, 0xee, 0x81, 0xdd, 0x56, 0xe7, 0xc4, 0xc0, 0xfa,
- 0x71, 0x07, 0xd7, 0x8d, 0x46, 0x4b, 0x3b, 0xe9, 0x1a, 0xf5, 0x8e, 0xd1, 0xee, 0xf4, 0x8c, 0x7e,
- 0x57, 0x87, 0x31, 0x74, 0x00, 0x3e, 0xfc, 0x97, 0x40, 0x1e, 0xb9, 0x60, 0x7c, 0xa6, 0x75, 0x4f,
- 0xe1, 0xdf, 0x4a, 0xed, 0x67, 0xe5, 0xed, 0x4d, 0x4e, 0x79, 0x77, 0x93, 0x53, 0xfe, 0xbc, 0xc9,
- 0x29, 0x3f, 0xdc, 0xe6, 0x62, 0xef, 0x6e, 0x73, 0xb1, 0xdf, 0x6f, 0x73, 0x31, 0x90, 0xb3, 0xd9,
- 0x63, 0xf7, 0x6b, 0x4d, 0x5c, 0xff, 0xfe, 0xb9, 0x08, 0x9d, 0x2b, 0x6f, 0x6a, 0xcf, 0x7e, 0x9f,
- 0x67, 0x9f, 0x29, 0x23, 0x42, 0xe7, 0x1f, 0x4c, 0xbf, 0xac, 0xec, 0x76, 0x5c, 0x42, 0x7b, 0x0b,
- 0x07, 0xe9, 0x2d, 0xfe, 0x9d, 0xfc, 0xe2, 0xc5, 0xd1, 0x60, 0x5d, 0xea, 0x2b, 0xff, 0x04, 0x00,
- 0x00, 0xff, 0xff, 0xc9, 0xbc, 0x36, 0x44, 0x74, 0x09, 0x00, 0x00,
-}
-
-func (m *LogsData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LogsData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LogsData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceLogs) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceLogs) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DeprecatedScopeLogs) > 0 {
- for iNdEx := len(m.DeprecatedScopeLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DeprecatedScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeLogs) > 0 {
- for iNdEx := len(m.ScopeLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeLogs) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeLogs) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.LogRecords) > 0 {
- for iNdEx := len(m.LogRecords) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.LogRecords[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *LogRecord) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LogRecord) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.EventName) > 0 {
- i -= len(m.EventName)
- copy(dAtA[i:], m.EventName)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.EventName)))
- i--
- dAtA[i] = 0x62
- }
- if m.ObservedTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ObservedTimeUnixNano))
- i--
- dAtA[i] = 0x59
- }
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- if m.Flags != 0 {
- i -= 4
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
- i--
- dAtA[i] = 0x45
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintLogs(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x38
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- {
- size, err := m.Body.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- if len(m.SeverityText) > 0 {
- i -= len(m.SeverityText)
- copy(dAtA[i:], m.SeverityText)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.SeverityText)))
- i--
- dAtA[i] = 0x1a
- }
- if m.SeverityNumber != 0 {
- i = encodeVarintLogs(dAtA, i, uint64(m.SeverityNumber))
- i--
- dAtA[i] = 0x10
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x9
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintLogs(dAtA []byte, offset int, v uint64) int {
- offset -= sovLogs(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *LogsData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for _, e := range m.ResourceLogs {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceLogs) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovLogs(uint64(l))
- if len(m.ScopeLogs) > 0 {
- for _, e := range m.ScopeLogs {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- if len(m.DeprecatedScopeLogs) > 0 {
- for _, e := range m.DeprecatedScopeLogs {
- l = e.Size()
- n += 2 + l + sovLogs(uint64(l))
- }
- }
- return n
-}
-
-func (m *ScopeLogs) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovLogs(uint64(l))
- if len(m.LogRecords) > 0 {
- for _, e := range m.LogRecords {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- return n
-}
-
-func (m *LogRecord) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.SeverityNumber != 0 {
- n += 1 + sovLogs(uint64(m.SeverityNumber))
- }
- l = len(m.SeverityText)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- l = m.Body.Size()
- n += 1 + l + sovLogs(uint64(l))
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovLogs(uint64(m.DroppedAttributesCount))
- }
- if m.Flags != 0 {
- n += 5
- }
- l = m.TraceId.Size()
- n += 1 + l + sovLogs(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovLogs(uint64(l))
- if m.ObservedTimeUnixNano != 0 {
- n += 9
- }
- l = len(m.EventName)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- return n
-}
-
-func sovLogs(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozLogs(x uint64) (n int) {
- return sovLogs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *LogsData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LogsData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LogsData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceLogs = append(m.ResourceLogs, &ResourceLogs{})
- if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceLogs) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceLogs: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceLogs: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeLogs = append(m.ScopeLogs, &ScopeLogs{})
- if err := m.ScopeLogs[len(m.ScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeprecatedScopeLogs = append(m.DeprecatedScopeLogs, &ScopeLogs{})
- if err := m.DeprecatedScopeLogs[len(m.DeprecatedScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeLogs) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeLogs: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeLogs: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LogRecords = append(m.LogRecords, &LogRecord{})
- if err := m.LogRecords[len(m.LogRecords)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LogRecord) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LogRecord: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LogRecord: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType)
- }
- m.SeverityNumber = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SeverityNumber |= SeverityNumber(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SeverityText = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 5 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- if (iNdEx + 4) > l {
- return io.ErrUnexpectedEOF
- }
- m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 11:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeUnixNano", wireType)
- }
- m.ObservedTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.ObservedTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EventName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipLogs(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthLogs
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupLogs
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLogs
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthLogs = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowLogs = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupLogs = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go
deleted file mode 100644
index 2371096c7..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go
+++ /dev/null
@@ -1,6655 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/metrics/v1/metrics.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// AggregationTemporality defines how a metric aggregator reports aggregated
-// values. It describes how those values relate to the time interval over
-// which they are aggregated.
-type AggregationTemporality int32
-
-const (
- // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
- AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
- // DELTA is an AggregationTemporality for a metric aggregator which reports
- // changes since last report time. Successive metrics contain aggregation of
- // values from continuous and non-overlapping intervals.
- //
- // The values for a DELTA metric are based only on the time interval
- // associated with one measurement cycle. There is no dependency on
- // previous measurements like is the case for CUMULATIVE metrics.
- //
- // For example, consider a system measuring the number of requests that
- // it receives and reports the sum of these requests every second as a
- // DELTA metric:
- //
- // 1. The system starts receiving at time=t_0.
- // 2. A request is received, the system measures 1 request.
- // 3. A request is received, the system measures 1 request.
- // 4. A request is received, the system measures 1 request.
- // 5. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+1 with a value of 3.
- // 6. A request is received, the system measures 1 request.
- // 7. A request is received, the system measures 1 request.
- // 8. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0+1 to
- // t_0+2 with a value of 2.
- AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
- // CUMULATIVE is an AggregationTemporality for a metric aggregator which
- // reports changes since a fixed start time. This means that current values
- // of a CUMULATIVE metric depend on all previous measurements since the
- // start time. Because of this, the sender is required to retain this state
- // in some form. If this state is lost or invalidated, the CUMULATIVE metric
- // values MUST be reset and a new fixed start time following the last
- // reported measurement time sent MUST be used.
- //
- // For example, consider a system measuring the number of requests that
- // it receives and reports the sum of these requests every second as a
- // CUMULATIVE metric:
- //
- // 1. The system starts receiving at time=t_0.
- // 2. A request is received, the system measures 1 request.
- // 3. A request is received, the system measures 1 request.
- // 4. A request is received, the system measures 1 request.
- // 5. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+1 with a value of 3.
- // 6. A request is received, the system measures 1 request.
- // 7. A request is received, the system measures 1 request.
- // 8. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+2 with a value of 5.
- // 9. The system experiences a fault and loses state.
- // 10. The system recovers and resumes receiving at time=t_1.
- // 11. A request is received, the system measures 1 request.
- // 12. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_1 to
- // t_0+1 with a value of 1.
- //
- // Note: Even though, when reporting changes since last report time, using
- // CUMULATIVE is valid, it is not recommended. This may cause problems for
- // systems that do not use start_time to determine when the aggregation
- // value was reset (e.g. Prometheus).
- AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
-)
-
-var AggregationTemporality_name = map[int32]string{
- 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
- 1: "AGGREGATION_TEMPORALITY_DELTA",
- 2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
-}
-
-var AggregationTemporality_value = map[string]int32{
- "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
- "AGGREGATION_TEMPORALITY_DELTA": 1,
- "AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
-}
-
-func (x AggregationTemporality) String() string {
- return proto.EnumName(AggregationTemporality_name, int32(x))
-}
-
-func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{0}
-}
-
-// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
-// bit-field representing 32 distinct boolean flags. Each flag defined in this
-// enum is a bit-mask. To test the presence of a single flag in the flags of
-// a data point, for example, use an expression like:
-//
-// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
-type DataPointFlags int32
-
-const (
- // The zero value for the enum. Should not be used for comparisons.
- // Instead use bitwise "and" with the appropriate mask as shown above.
- DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0
- // This DataPoint is valid but has no recorded value. This value
- // SHOULD be used to reflect explicitly missing data in a series, as
- // for an equivalent to the Prometheus "staleness marker".
- DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1
-)
-
-var DataPointFlags_name = map[int32]string{
- 0: "DATA_POINT_FLAGS_DO_NOT_USE",
- 1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK",
-}
-
-var DataPointFlags_value = map[string]int32{
- "DATA_POINT_FLAGS_DO_NOT_USE": 0,
- "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1,
-}
-
-func (x DataPointFlags) String() string {
- return proto.EnumName(DataPointFlags_name, int32(x))
-}
-
-func (DataPointFlags) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{1}
-}
-
-// MetricsData represents the metrics data that can be stored in a persistent
-// storage, OR can be embedded by other protocols that transfer OTLP metrics
-// data but do not implement the OTLP protocol.
-//
-// MetricsData
-// └─── ResourceMetrics
-//
-// ├── Resource
-// ├── SchemaURL
-// └── ScopeMetrics
-// ├── Scope
-// ├── SchemaURL
-// └── Metric
-// ├── Name
-// ├── Description
-// ├── Unit
-// └── data
-// ├── Gauge
-// ├── Sum
-// ├── Histogram
-// ├── ExponentialHistogram
-// └── Summary
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type MetricsData struct {
- // An array of ResourceMetrics.
- // For data coming from a single resource this array will typically contain
- // one element. Intermediary nodes that receive data from multiple origins
- // typically batch the data before forwarding further and in that case this
- // array will contain multiple elements.
- ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
-}
-
-func (m *MetricsData) Reset() { *m = MetricsData{} }
-func (m *MetricsData) String() string { return proto.CompactTextString(m) }
-func (*MetricsData) ProtoMessage() {}
-func (*MetricsData) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{0}
-}
-func (m *MetricsData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MetricsData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MetricsData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricsData.Merge(m, src)
-}
-func (m *MetricsData) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricsData) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricsData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricsData proto.InternalMessageInfo
-
-func (m *MetricsData) GetResourceMetrics() []*ResourceMetrics {
- if m != nil {
- return m.ResourceMetrics
- }
- return nil
-}
-
-// A collection of ScopeMetrics from a Resource.
-type ResourceMetrics struct {
- DeprecatedScopeMetrics []*ScopeMetrics `protobuf:"bytes,1000,rep,name=deprecated_scope_metrics,json=deprecatedScopeMetrics,proto3" json:"deprecated_scope_metrics,omitempty"`
- // The resource for the metrics in this message.
- // If this field is not set then no resource info is known.
- Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of metrics that originate from a resource.
- ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_metrics" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} }
-func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) }
-func (*ResourceMetrics) ProtoMessage() {}
-func (*ResourceMetrics) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{1}
-}
-func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceMetrics) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetrics.Merge(m, src)
-}
-func (m *ResourceMetrics) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetrics) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetrics.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo
-
-func (m *ResourceMetrics) GetDeprecatedScopeMetrics() []*ScopeMetrics {
- if m != nil {
- return m.DeprecatedScopeMetrics
- }
- return nil
-}
-
-func (m *ResourceMetrics) GetResource() v1.Resource {
- if m != nil {
- return m.Resource
- }
- return v1.Resource{}
-}
-
-func (m *ResourceMetrics) GetScopeMetrics() []*ScopeMetrics {
- if m != nil {
- return m.ScopeMetrics
- }
- return nil
-}
-
-func (m *ResourceMetrics) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Metrics produced by an Scope.
-type ScopeMetrics struct {
- // The instrumentation scope information for the metrics in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of metrics that originate from an instrumentation library.
- Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the metric data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all metrics in the "metrics" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeMetrics) Reset() { *m = ScopeMetrics{} }
-func (m *ScopeMetrics) String() string { return proto.CompactTextString(m) }
-func (*ScopeMetrics) ProtoMessage() {}
-func (*ScopeMetrics) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{2}
-}
-func (m *ScopeMetrics) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeMetrics.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeMetrics) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeMetrics.Merge(m, src)
-}
-func (m *ScopeMetrics) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeMetrics) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeMetrics.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeMetrics proto.InternalMessageInfo
-
-func (m *ScopeMetrics) GetScope() v11.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v11.InstrumentationScope{}
-}
-
-func (m *ScopeMetrics) GetMetrics() []*Metric {
- if m != nil {
- return m.Metrics
- }
- return nil
-}
-
-func (m *ScopeMetrics) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// Defines a Metric which has one or more timeseries. The following is a
-// brief summary of the Metric data model. For more details, see:
-//
-// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
-//
-// The data model and relation between entities is shown in the
-// diagram below. Here, "DataPoint" is the term used to refer to any
-// one of the specific data point value types, and "points" is the term used
-// to refer to any one of the lists of points contained in the Metric.
-//
-// - Metric is composed of a metadata and data.
-//
-// - Metadata part contains a name, description, unit.
-//
-// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
-//
-// - DataPoint contains timestamps, attributes, and one of the possible value type
-// fields.
-//
-// Metric
-// +------------+
-// |name |
-// |description |
-// |unit | +------------------------------------+
-// |data |---> |Gauge, Sum, Histogram, Summary, ... |
-// +------------+ +------------------------------------+
-//
-// Data [One of Gauge, Sum, Histogram, Summary, ...]
-// +-----------+
-// |... | // Metadata about the Data.
-// |points |--+
-// +-----------+ |
-// | +---------------------------+
-// | |DataPoint 1 |
-// v |+------+------+ +------+ |
-// +-----+ ||label |label |...|label | |
-// | 1 |-->||value1|value2|...|valueN| |
-// +-----+ |+------+------+ +------+ |
-// | . | |+-----+ |
-// | . | ||value| |
-// | . | |+-----+ |
-// | . | +---------------------------+
-// | . | .
-// | . | .
-// | . | .
-// | . | +---------------------------+
-// | . | |DataPoint M |
-// +-----+ |+------+------+ +------+ |
-// | M |-->||label |label |...|label | |
-// +-----+ ||value1|value2|...|valueN| |
-// |+------+------+ +------+ |
-// |+-----+ |
-// ||value| |
-// |+-----+ |
-// +---------------------------+
-//
-// Each distinct type of DataPoint represents the output of a specific
-// aggregation function, the result of applying the DataPoint's
-// associated function of to one or more measurements.
-//
-// All DataPoint types have three common fields:
-// - Attributes includes key-value pairs associated with the data point
-// - TimeUnixNano is required, set to the end time of the aggregation
-// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
-// having an AggregationTemporality field, as discussed below.
-//
-// Both TimeUnixNano and StartTimeUnixNano values are expressed as
-// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
-//
-// # TimeUnixNano
-//
-// This field is required, having consistent interpretation across
-// DataPoint types. TimeUnixNano is the moment corresponding to when
-// the data point's aggregate value was captured.
-//
-// Data points with the 0 value for TimeUnixNano SHOULD be rejected
-// by consumers.
-//
-// # StartTimeUnixNano
-//
-// StartTimeUnixNano in general allows detecting when a sequence of
-// observations is unbroken. This field indicates to consumers the
-// start time for points with cumulative and delta
-// AggregationTemporality, and it should be included whenever possible
-// to support correct rate calculation. Although it may be omitted
-// when the start time is truly unknown, setting StartTimeUnixNano is
-// strongly encouraged.
-type Metric struct {
- // name of the metric.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // description of the metric, which can be used in documentation.
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- // unit in which the metric value is reported. Follows the format
- // described by https://unitsofmeasure.org/ucum.html.
- Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
- // Data determines the aggregation type (if any) of the metric, what is the
- // reported value type for the data points, as well as the relatationship to
- // the time interval over which they are reported.
- //
- // Types that are valid to be assigned to Data:
- // *Metric_Gauge
- // *Metric_Sum
- // *Metric_Histogram
- // *Metric_ExponentialHistogram
- // *Metric_Summary
- Data isMetric_Data `protobuf_oneof:"data"`
- // Additional metadata attributes that describe the metric. [Optional].
- // Attributes are non-identifying.
- // Consumers SHOULD NOT need to be aware of these attributes.
- // These attributes MAY be used to encode information allowing
- // for lossless roundtrip translation to / from another data model.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Metadata []v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{3}
-}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return m.Size()
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metric proto.InternalMessageInfo
-
-type isMetric_Data interface {
- isMetric_Data()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Metric_Gauge struct {
- Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof" json:"gauge,omitempty"`
-}
-type Metric_Sum struct {
- Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
-}
-type Metric_Histogram struct {
- Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof" json:"histogram,omitempty"`
-}
-type Metric_ExponentialHistogram struct {
- ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof" json:"exponential_histogram,omitempty"`
-}
-type Metric_Summary struct {
- Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof" json:"summary,omitempty"`
-}
-
-func (*Metric_Gauge) isMetric_Data() {}
-func (*Metric_Sum) isMetric_Data() {}
-func (*Metric_Histogram) isMetric_Data() {}
-func (*Metric_ExponentialHistogram) isMetric_Data() {}
-func (*Metric_Summary) isMetric_Data() {}
-
-func (m *Metric) GetData() isMetric_Data {
- if m != nil {
- return m.Data
- }
- return nil
-}
-
-func (m *Metric) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Metric) GetDescription() string {
- if m != nil {
- return m.Description
- }
- return ""
-}
-
-func (m *Metric) GetUnit() string {
- if m != nil {
- return m.Unit
- }
- return ""
-}
-
-func (m *Metric) GetGauge() *Gauge {
- if x, ok := m.GetData().(*Metric_Gauge); ok {
- return x.Gauge
- }
- return nil
-}
-
-func (m *Metric) GetSum() *Sum {
- if x, ok := m.GetData().(*Metric_Sum); ok {
- return x.Sum
- }
- return nil
-}
-
-func (m *Metric) GetHistogram() *Histogram {
- if x, ok := m.GetData().(*Metric_Histogram); ok {
- return x.Histogram
- }
- return nil
-}
-
-func (m *Metric) GetExponentialHistogram() *ExponentialHistogram {
- if x, ok := m.GetData().(*Metric_ExponentialHistogram); ok {
- return x.ExponentialHistogram
- }
- return nil
-}
-
-func (m *Metric) GetSummary() *Summary {
- if x, ok := m.GetData().(*Metric_Summary); ok {
- return x.Summary
- }
- return nil
-}
-
-func (m *Metric) GetMetadata() []v11.KeyValue {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Metric) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Metric_Gauge)(nil),
- (*Metric_Sum)(nil),
- (*Metric_Histogram)(nil),
- (*Metric_ExponentialHistogram)(nil),
- (*Metric_Summary)(nil),
- }
-}
-
-// Gauge represents the type of a scalar metric that always exports the
-// "current value" for every data point. It should be used for an "unknown"
-// aggregation.
-//
-// A Gauge does not support different aggregation temporalities. Given the
-// aggregation is unknown, points cannot be combined using the same
-// aggregation, regardless of aggregation temporalities. Therefore,
-// AggregationTemporality is not included. Consequently, this also means
-// "StartTimeUnixNano" is ignored for all data points.
-type Gauge struct {
- DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
-}
-
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{4}
-}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
-}
-func (m *Gauge) XXX_Size() int {
- return m.Size()
-}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
-
-func (m *Gauge) GetDataPoints() []*NumberDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-// Sum represents the type of a scalar metric that is calculated as a sum of all
-// reported measurements over a time interval.
-type Sum struct {
- DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
- // If "true" means that the sum is monotonic.
- IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
-}
-
-func (m *Sum) Reset() { *m = Sum{} }
-func (m *Sum) String() string { return proto.CompactTextString(m) }
-func (*Sum) ProtoMessage() {}
-func (*Sum) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{5}
-}
-func (m *Sum) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Sum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Sum.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Sum) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Sum.Merge(m, src)
-}
-func (m *Sum) XXX_Size() int {
- return m.Size()
-}
-func (m *Sum) XXX_DiscardUnknown() {
- xxx_messageInfo_Sum.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Sum proto.InternalMessageInfo
-
-func (m *Sum) GetDataPoints() []*NumberDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-func (m *Sum) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-func (m *Sum) GetIsMonotonic() bool {
- if m != nil {
- return m.IsMonotonic
- }
- return false
-}
-
-// Histogram represents the type of a metric that is calculated by aggregating
-// as a Histogram of all reported measurements over a time interval.
-type Histogram struct {
- DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
-}
-
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{6}
-}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return m.Size()
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
-
-func (m *Histogram) GetDataPoints() []*HistogramDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-func (m *Histogram) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-// ExponentialHistogram represents the type of a metric that is calculated by aggregating
-// as a ExponentialHistogram of all reported double measurements over a time interval.
-type ExponentialHistogram struct {
- DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
-}
-
-func (m *ExponentialHistogram) Reset() { *m = ExponentialHistogram{} }
-func (m *ExponentialHistogram) String() string { return proto.CompactTextString(m) }
-func (*ExponentialHistogram) ProtoMessage() {}
-func (*ExponentialHistogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{7}
-}
-func (m *ExponentialHistogram) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExponentialHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExponentialHistogram.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExponentialHistogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExponentialHistogram.Merge(m, src)
-}
-func (m *ExponentialHistogram) XXX_Size() int {
- return m.Size()
-}
-func (m *ExponentialHistogram) XXX_DiscardUnknown() {
- xxx_messageInfo_ExponentialHistogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExponentialHistogram proto.InternalMessageInfo
-
-func (m *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-// Summary metric data are used to convey quantile summaries,
-// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
-// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
-// data type. These data points cannot always be merged in a meaningful way.
-// While they can be useful in some applications, histogram data points are
-// recommended for new applications.
-// Summary metrics do not have an aggregation temporality field. This is
-// because the count and sum fields of a SummaryDataPoint are assumed to be
-// cumulative values.
-type Summary struct {
- DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
-}
-
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{8}
-}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
-}
-func (m *Summary) XXX_Size() int {
- return m.Size()
-}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Summary proto.InternalMessageInfo
-
-func (m *Summary) GetDataPoints() []*SummaryDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-// NumberDataPoint is a single data point in a timeseries that describes the
-// time-varying scalar value of a metric.
-type NumberDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // The value itself. A point is considered invalid when one of the recognized
- // value fields is not present inside this oneof.
- //
- // Types that are valid to be assigned to Value:
- // *NumberDataPoint_AsDouble
- // *NumberDataPoint_AsInt
- Value isNumberDataPoint_Value `protobuf_oneof:"value"`
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- Exemplars []Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
-}
-
-func (m *NumberDataPoint) Reset() { *m = NumberDataPoint{} }
-func (m *NumberDataPoint) String() string { return proto.CompactTextString(m) }
-func (*NumberDataPoint) ProtoMessage() {}
-func (*NumberDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{9}
-}
-func (m *NumberDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NumberDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_NumberDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *NumberDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NumberDataPoint.Merge(m, src)
-}
-func (m *NumberDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *NumberDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_NumberDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NumberDataPoint proto.InternalMessageInfo
-
-type isNumberDataPoint_Value interface {
- isNumberDataPoint_Value()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type NumberDataPoint_AsDouble struct {
- AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"`
-}
-type NumberDataPoint_AsInt struct {
- AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"`
-}
-
-func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
-func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
-
-func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *NumberDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *NumberDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetAsDouble() float64 {
- if x, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok {
- return x.AsDouble
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetAsInt() int64 {
- if x, ok := m.GetValue().(*NumberDataPoint_AsInt); ok {
- return x.AsInt
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetExemplars() []Exemplar {
- if m != nil {
- return m.Exemplars
- }
- return nil
-}
-
-func (m *NumberDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*NumberDataPoint) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*NumberDataPoint_AsDouble)(nil),
- (*NumberDataPoint_AsInt)(nil),
- }
-}
-
-// HistogramDataPoint is a single data point in a timeseries that describes the
-// time-varying values of a Histogram. A Histogram contains summary statistics
-// for a population of values, it may optionally contain the distribution of
-// those values across a set of buckets.
-//
-// If the histogram contains the distribution of values, then both
-// "explicit_bounds" and "bucket counts" fields must be defined.
-// If the histogram does not contain the distribution of values, then both
-// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
-// "sum" are known.
-type HistogramDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // count is the number of values in the population. Must be non-negative. This
- // value must be equal to the sum of the "count" fields in buckets if a
- // histogram is provided.
- Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
- //
- // Types that are valid to be assigned to Sum_:
- // *HistogramDataPoint_Sum
- Sum_ isHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"`
- // bucket_counts is an optional field contains the count values of histogram
- // for each bucket.
- //
- // The sum of the bucket_counts must equal the value in the count field.
- //
- // The number of elements in bucket_counts array must be by one greater than
- // the number of elements in explicit_bounds array. The exception to this rule
- // is when the length of bucket_counts is 0, then the length of explicit_bounds
- // must also be 0.
- BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
- // explicit_bounds specifies buckets with explicitly defined bounds for values.
- //
- // The boundaries for bucket at index i are:
- //
- // (-infinity, explicit_bounds[i]] for i == 0
- // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
- // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
- //
- // The values in the explicit_bounds array must be strictly increasing.
- //
- // Histogram buckets are inclusive of their upper boundary, except the last
- // bucket where the boundary is at infinity. This format is intentionally
- // compatible with the OpenMetrics histogram definition.
- //
- // If bucket_counts length is 0 then explicit_bounds length must also be 0,
- // otherwise the data point is invalid.
- ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- Exemplars []Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
- // min is the minimum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Min_:
- // *HistogramDataPoint_Min
- Min_ isHistogramDataPoint_Min_ `protobuf_oneof:"min_"`
- // max is the maximum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Max_:
- // *HistogramDataPoint_Max
- Max_ isHistogramDataPoint_Max_ `protobuf_oneof:"max_"`
-}
-
-func (m *HistogramDataPoint) Reset() { *m = HistogramDataPoint{} }
-func (m *HistogramDataPoint) String() string { return proto.CompactTextString(m) }
-func (*HistogramDataPoint) ProtoMessage() {}
-func (*HistogramDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{10}
-}
-func (m *HistogramDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HistogramDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HistogramDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HistogramDataPoint.Merge(m, src)
-}
-func (m *HistogramDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *HistogramDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_HistogramDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HistogramDataPoint proto.InternalMessageInfo
-
-type isHistogramDataPoint_Sum_ interface {
- isHistogramDataPoint_Sum_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isHistogramDataPoint_Min_ interface {
- isHistogramDataPoint_Min_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isHistogramDataPoint_Max_ interface {
- isHistogramDataPoint_Max_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type HistogramDataPoint_Sum struct {
- Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
-}
-type HistogramDataPoint_Min struct {
- Min float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
-}
-type HistogramDataPoint_Max struct {
- Max float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
-}
-
-func (*HistogramDataPoint_Sum) isHistogramDataPoint_Sum_() {}
-func (*HistogramDataPoint_Min) isHistogramDataPoint_Min_() {}
-func (*HistogramDataPoint_Max) isHistogramDataPoint_Max_() {}
-
-func (m *HistogramDataPoint) GetSum_() isHistogramDataPoint_Sum_ {
- if m != nil {
- return m.Sum_
- }
- return nil
-}
-func (m *HistogramDataPoint) GetMin_() isHistogramDataPoint_Min_ {
- if m != nil {
- return m.Min_
- }
- return nil
-}
-func (m *HistogramDataPoint) GetMax_() isHistogramDataPoint_Max_ {
- if m != nil {
- return m.Max_
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetCount() uint64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetSum() float64 {
- if x, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok {
- return x.Sum
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetBucketCounts() []uint64 {
- if m != nil {
- return m.BucketCounts
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetExplicitBounds() []float64 {
- if m != nil {
- return m.ExplicitBounds
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetExemplars() []Exemplar {
- if m != nil {
- return m.Exemplars
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetMin() float64 {
- if x, ok := m.GetMin_().(*HistogramDataPoint_Min); ok {
- return x.Min
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetMax() float64 {
- if x, ok := m.GetMax_().(*HistogramDataPoint_Max); ok {
- return x.Max
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*HistogramDataPoint) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*HistogramDataPoint_Sum)(nil),
- (*HistogramDataPoint_Min)(nil),
- (*HistogramDataPoint_Max)(nil),
- }
-}
-
-// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
-// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
-// summary statistics for a population of values, it may optionally contain the
-// distribution of those values across a set of buckets.
-type ExponentialHistogramDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // count is the number of values in the population. Must be
- // non-negative. This value must be equal to the sum of the "bucket_counts"
- // values in the positive and negative Buckets plus the "zero_count" field.
- Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
- //
- // Types that are valid to be assigned to Sum_:
- // *ExponentialHistogramDataPoint_Sum
- Sum_ isExponentialHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"`
- // scale describes the resolution of the histogram. Boundaries are
- // located at powers of the base, where:
- //
- // base = (2^(2^-scale))
- //
- // The histogram bucket identified by `index`, a signed integer,
- // contains values that are greater than (base^index) and
- // less than or equal to (base^(index+1)).
- //
- // The positive and negative ranges of the histogram are expressed
- // separately. Negative values are mapped by their absolute value
- // into the negative range using the same scale as the positive range.
- //
- // scale is not restricted by the protocol, as the permissible
- // values depend on the range of the data.
- Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
- // zero_count is the count of values that are either exactly zero or
- // within the region considered zero by the instrumentation at the
- // tolerated degree of precision. This bucket stores values that
- // cannot be expressed using the standard exponential formula as
- // well as values that have been rounded to zero.
- //
- // Implementations MAY consider the zero bucket to have probability
- // mass equal to (zero_count / count).
- ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
- // positive carries the positive range of exponential bucket counts.
- Positive ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive"`
- // negative carries the negative range of exponential bucket counts.
- Negative ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- Exemplars []Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars"`
- // min is the minimum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Min_:
- // *ExponentialHistogramDataPoint_Min
- Min_ isExponentialHistogramDataPoint_Min_ `protobuf_oneof:"min_"`
- // max is the maximum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Max_:
- // *ExponentialHistogramDataPoint_Max
- Max_ isExponentialHistogramDataPoint_Max_ `protobuf_oneof:"max_"`
- // ZeroThreshold may be optionally set to convey the width of the zero
- // region. Where the zero region is defined as the closed interval
- // [-ZeroThreshold, ZeroThreshold].
- // When ZeroThreshold is 0, zero count bucket stores values that cannot be
- // expressed using the standard exponential formula as well as values that
- // have been rounded to zero.
- ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
-}
-
-func (m *ExponentialHistogramDataPoint) Reset() { *m = ExponentialHistogramDataPoint{} }
-func (m *ExponentialHistogramDataPoint) String() string { return proto.CompactTextString(m) }
-func (*ExponentialHistogramDataPoint) ProtoMessage() {}
-func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{11}
-}
-func (m *ExponentialHistogramDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExponentialHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExponentialHistogramDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExponentialHistogramDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExponentialHistogramDataPoint.Merge(m, src)
-}
-func (m *ExponentialHistogramDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *ExponentialHistogramDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_ExponentialHistogramDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExponentialHistogramDataPoint proto.InternalMessageInfo
-
-type isExponentialHistogramDataPoint_Sum_ interface {
- isExponentialHistogramDataPoint_Sum_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isExponentialHistogramDataPoint_Min_ interface {
- isExponentialHistogramDataPoint_Min_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isExponentialHistogramDataPoint_Max_ interface {
- isExponentialHistogramDataPoint_Max_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type ExponentialHistogramDataPoint_Sum struct {
- Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
-}
-type ExponentialHistogramDataPoint_Min struct {
- Min float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
-}
-type ExponentialHistogramDataPoint_Max struct {
- Max float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
-}
-
-func (*ExponentialHistogramDataPoint_Sum) isExponentialHistogramDataPoint_Sum_() {}
-func (*ExponentialHistogramDataPoint_Min) isExponentialHistogramDataPoint_Min_() {}
-func (*ExponentialHistogramDataPoint_Max) isExponentialHistogramDataPoint_Max_() {}
-
-func (m *ExponentialHistogramDataPoint) GetSum_() isExponentialHistogramDataPoint_Sum_ {
- if m != nil {
- return m.Sum_
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint) GetMin_() isExponentialHistogramDataPoint_Min_ {
- if m != nil {
- return m.Min_
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint) GetMax_() isExponentialHistogramDataPoint_Max_ {
- if m != nil {
- return m.Max_
- }
- return nil
-}
-
-func (m *ExponentialHistogramDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetCount() uint64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetSum() float64 {
- if x, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok {
- return x.Sum
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetScale() int32 {
- if m != nil {
- return m.Scale
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetZeroCount() uint64 {
- if m != nil {
- return m.ZeroCount
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetPositive() ExponentialHistogramDataPoint_Buckets {
- if m != nil {
- return m.Positive
- }
- return ExponentialHistogramDataPoint_Buckets{}
-}
-
-func (m *ExponentialHistogramDataPoint) GetNegative() ExponentialHistogramDataPoint_Buckets {
- if m != nil {
- return m.Negative
- }
- return ExponentialHistogramDataPoint_Buckets{}
-}
-
-func (m *ExponentialHistogramDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetExemplars() []Exemplar {
- if m != nil {
- return m.Exemplars
- }
- return nil
-}
-
-func (m *ExponentialHistogramDataPoint) GetMin() float64 {
- if x, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok {
- return x.Min
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetMax() float64 {
- if x, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok {
- return x.Max
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetZeroThreshold() float64 {
- if m != nil {
- return m.ZeroThreshold
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ExponentialHistogramDataPoint) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ExponentialHistogramDataPoint_Sum)(nil),
- (*ExponentialHistogramDataPoint_Min)(nil),
- (*ExponentialHistogramDataPoint_Max)(nil),
- }
-}
-
-// Buckets are a set of bucket counts, encoded in a contiguous array
-// of counts.
-type ExponentialHistogramDataPoint_Buckets struct {
- // Offset is the bucket index of the first entry in the bucket_counts array.
- //
- // Note: This uses a varint encoding as a simple form of compression.
- Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
- // bucket_counts is an array of count values, where bucket_counts[i] carries
- // the count of the bucket at index (offset+i). bucket_counts[i] is the count
- // of values greater than base^(offset+i) and less than or equal to
- // base^(offset+i+1).
- //
- // Note: By contrast, the explicit HistogramDataPoint uses
- // fixed64. This field is expected to have many buckets,
- // especially zeros, so uint64 has been selected to ensure
- // varint encoding.
- BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) Reset() { *m = ExponentialHistogramDataPoint_Buckets{} }
-func (m *ExponentialHistogramDataPoint_Buckets) String() string { return proto.CompactTextString(m) }
-func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {}
-func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{11, 0}
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Merge(m, src)
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Size() int {
- return m.Size()
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_DiscardUnknown() {
- xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExponentialHistogramDataPoint_Buckets proto.InternalMessageInfo
-
-func (m *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 {
- if m != nil {
- return m.Offset
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 {
- if m != nil {
- return m.BucketCounts
- }
- return nil
-}
-
-// SummaryDataPoint is a single data point in a timeseries that describes the
-// time-varying values of a Summary metric. The count and sum fields represent
-// cumulative values.
-type SummaryDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // count is the number of values in the population. Must be non-negative.
- Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary
- Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
- // (Optional) list of values at different quantiles of the distribution calculated
- // from the current snapshot. The quantiles must be strictly increasing.
- QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
-}
-
-func (m *SummaryDataPoint) Reset() { *m = SummaryDataPoint{} }
-func (m *SummaryDataPoint) String() string { return proto.CompactTextString(m) }
-func (*SummaryDataPoint) ProtoMessage() {}
-func (*SummaryDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{12}
-}
-func (m *SummaryDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SummaryDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SummaryDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SummaryDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryDataPoint.Merge(m, src)
-}
-func (m *SummaryDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *SummaryDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryDataPoint proto.InternalMessageInfo
-
-func (m *SummaryDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *SummaryDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetCount() uint64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetSum() float64 {
- if m != nil {
- return m.Sum
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile {
- if m != nil {
- return m.QuantileValues
- }
- return nil
-}
-
-func (m *SummaryDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-// Represents the value at a given quantile of a distribution.
-//
-// To record Min and Max values following conventions are used:
-// - The 1.0 quantile is equivalent to the maximum value observed.
-// - The 0.0 quantile is equivalent to the minimum value observed.
-//
-// See the following issue for more context:
-// https://github.com/open-telemetry/opentelemetry-proto/issues/125
-type SummaryDataPoint_ValueAtQuantile struct {
- // The quantile of a distribution. Must be in the interval
- // [0.0, 1.0].
- Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
- // The value at the given quantile of a distribution.
- //
- // Quantile values must NOT be negative.
- Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) Reset() { *m = SummaryDataPoint_ValueAtQuantile{} }
-func (m *SummaryDataPoint_ValueAtQuantile) String() string { return proto.CompactTextString(m) }
-func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {}
-func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{12, 0}
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Merge(m, src)
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Size() int {
- return m.Size()
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryDataPoint_ValueAtQuantile proto.InternalMessageInfo
-
-func (m *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 {
- if m != nil {
- return m.Quantile
- }
- return 0
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) GetValue() float64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// A representation of an exemplar, which is a sample input measurement.
-// Exemplars also hold information about the environment when the measurement
-// was recorded, for example the span and trace ID of the active span when the
-// exemplar was recorded.
-type Exemplar struct {
- // The set of key/value pairs that were filtered out by the aggregator, but
- // recorded alongside the original measurement. Only key/value pairs that were
- // filtered out by the aggregator should be included
- FilteredAttributes []v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes"`
- // time_unix_nano is the exact time when this exemplar was recorded
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // The value of the measurement that was recorded. An exemplar is
- // considered invalid when one of the recognized value fields is not present
- // inside this oneof.
- //
- // Types that are valid to be assigned to Value:
- // *Exemplar_AsDouble
- // *Exemplar_AsInt
- Value isExemplar_Value `protobuf_oneof:"value"`
- // (Optional) Span ID of the exemplar trace.
- // span_id may be missing if the measurement is not recorded inside a trace
- // or if the trace is not sampled.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // (Optional) Trace ID of the exemplar trace.
- // trace_id may be missing if the measurement is not recorded inside a trace
- // or if the trace is not sampled.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
-}
-
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{13}
-}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
-}
-func (m *Exemplar) XXX_Size() int {
- return m.Size()
-}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
-
-type isExemplar_Value interface {
- isExemplar_Value()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Exemplar_AsDouble struct {
- AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"`
-}
-type Exemplar_AsInt struct {
- AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"`
-}
-
-func (*Exemplar_AsDouble) isExemplar_Value() {}
-func (*Exemplar_AsInt) isExemplar_Value() {}
-
-func (m *Exemplar) GetValue() isExemplar_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Exemplar) GetFilteredAttributes() []v11.KeyValue {
- if m != nil {
- return m.FilteredAttributes
- }
- return nil
-}
-
-func (m *Exemplar) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *Exemplar) GetAsDouble() float64 {
- if x, ok := m.GetValue().(*Exemplar_AsDouble); ok {
- return x.AsDouble
- }
- return 0
-}
-
-func (m *Exemplar) GetAsInt() int64 {
- if x, ok := m.GetValue().(*Exemplar_AsInt); ok {
- return x.AsInt
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Exemplar) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Exemplar_AsDouble)(nil),
- (*Exemplar_AsInt)(nil),
- }
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value)
- proto.RegisterEnum("opentelemetry.proto.metrics.v1.DataPointFlags", DataPointFlags_name, DataPointFlags_value)
- proto.RegisterType((*MetricsData)(nil), "opentelemetry.proto.metrics.v1.MetricsData")
- proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics")
- proto.RegisterType((*ScopeMetrics)(nil), "opentelemetry.proto.metrics.v1.ScopeMetrics")
- proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric")
- proto.RegisterType((*Gauge)(nil), "opentelemetry.proto.metrics.v1.Gauge")
- proto.RegisterType((*Sum)(nil), "opentelemetry.proto.metrics.v1.Sum")
- proto.RegisterType((*Histogram)(nil), "opentelemetry.proto.metrics.v1.Histogram")
- proto.RegisterType((*ExponentialHistogram)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogram")
- proto.RegisterType((*Summary)(nil), "opentelemetry.proto.metrics.v1.Summary")
- proto.RegisterType((*NumberDataPoint)(nil), "opentelemetry.proto.metrics.v1.NumberDataPoint")
- proto.RegisterType((*HistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.HistogramDataPoint")
- proto.RegisterType((*ExponentialHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint")
- proto.RegisterType((*ExponentialHistogramDataPoint_Buckets)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets")
- proto.RegisterType((*SummaryDataPoint)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint")
- proto.RegisterType((*SummaryDataPoint_ValueAtQuantile)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile")
- proto.RegisterType((*Exemplar)(nil), "opentelemetry.proto.metrics.v1.Exemplar")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917)
-}
-
-var fileDescriptor_3c3112f9fa006917 = []byte{
- // 1568 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x4f, 0x1b, 0x49,
- 0x16, 0x77, 0xfb, 0xdb, 0xcf, 0x06, 0x9c, 0x5a, 0x96, 0xb4, 0x58, 0xe1, 0x38, 0xce, 0x26, 0xb0,
- 0xd9, 0xc8, 0x5e, 0xc8, 0x6a, 0x3f, 0x0e, 0x91, 0x62, 0x63, 0x03, 0x26, 0x80, 0x49, 0xd9, 0x20,
- 0x25, 0x8a, 0xd2, 0x2a, 0xec, 0xc2, 0xb4, 0xd2, 0xdd, 0xe5, 0xed, 0xae, 0x46, 0xb0, 0xff, 0xc1,
- 0x4a, 0x7b, 0xc8, 0xdf, 0xb1, 0xca, 0x6d, 0x4f, 0x73, 0x9b, 0x63, 0x8e, 0x99, 0xdb, 0x68, 0x34,
- 0x8a, 0x46, 0xe4, 0x30, 0x23, 0xcd, 0x3f, 0x31, 0xaa, 0xea, 0x6e, 0xfc, 0x81, 0x89, 0xc9, 0xc7,
- 0x21, 0x39, 0xb9, 0xea, 0xd5, 0x7b, 0xbf, 0x7a, 0xaf, 0xde, 0xef, 0xd5, 0x2b, 0x37, 0xdc, 0x63,
- 0x3d, 0x6a, 0x71, 0x6a, 0x50, 0x93, 0x72, 0xfb, 0xb4, 0xd4, 0xb3, 0x19, 0x67, 0x25, 0x31, 0xd6,
- 0xdb, 0x4e, 0xe9, 0x78, 0x39, 0x18, 0x16, 0xe5, 0x02, 0xca, 0x0d, 0x69, 0x7b, 0xc2, 0x62, 0xa0,
- 0x72, 0xbc, 0x3c, 0x3f, 0xdb, 0x65, 0x5d, 0xe6, 0x61, 0x88, 0x91, 0xa7, 0x30, 0x7f, 0x77, 0xdc,
- 0x1e, 0x6d, 0x66, 0x9a, 0xcc, 0x12, 0x5b, 0x78, 0x23, 0x5f, 0xb7, 0x38, 0x4e, 0xd7, 0xa6, 0x0e,
- 0x73, 0xed, 0x36, 0x15, 0xda, 0xc1, 0xd8, 0xd3, 0x2f, 0xe8, 0x90, 0xde, 0xf6, 0xf6, 0xaf, 0x12,
- 0x4e, 0xd0, 0x53, 0xc8, 0x06, 0x0a, 0x9a, 0xef, 0x97, 0xaa, 0xe4, 0x23, 0x4b, 0xe9, 0x95, 0x52,
- 0xf1, 0xfd, 0xbe, 0x17, 0xb1, 0x6f, 0xe7, 0xc3, 0xe1, 0x19, 0x7b, 0x58, 0x50, 0xf8, 0x26, 0x0c,
- 0x33, 0x23, 0x4a, 0xa8, 0x0b, 0x6a, 0x87, 0xf6, 0x6c, 0xda, 0x26, 0x9c, 0x76, 0x34, 0xa7, 0xcd,
- 0x7a, 0xfd, 0x7d, 0x7f, 0x49, 0xc8, 0x8d, 0xef, 0x4d, 0xda, 0xb8, 0x29, 0xac, 0x82, 0x5d, 0xe7,
- 0xfa, 0x70, 0x83, 0x72, 0xf4, 0x08, 0x92, 0x81, 0x3f, 0xaa, 0x92, 0x57, 0x96, 0xd2, 0x2b, 0x7f,
- 0x1a, 0x8b, 0x7b, 0x7e, 0x3c, 0x03, 0x11, 0x55, 0xa2, 0xaf, 0xdf, 0xde, 0x08, 0xe1, 0x73, 0x00,
- 0xf4, 0x18, 0xa6, 0x86, 0x5d, 0x0d, 0x7f, 0x84, 0xa7, 0x19, 0x67, 0xd0, 0xbf, 0x05, 0x00, 0xa7,
- 0x7d, 0x44, 0x4d, 0xa2, 0xb9, 0xb6, 0xa1, 0x46, 0xf2, 0xca, 0x52, 0x0a, 0xa7, 0x3c, 0xc9, 0x9e,
- 0x6d, 0x14, 0xbe, 0x55, 0x20, 0x33, 0x14, 0x4f, 0x03, 0x62, 0xd2, 0xde, 0x0f, 0xe6, 0xfe, 0xd8,
- 0xad, 0x7d, 0x66, 0x1c, 0x2f, 0x17, 0xeb, 0x96, 0xc3, 0x6d, 0xd7, 0xa4, 0x16, 0x27, 0x5c, 0x67,
- 0x96, 0x84, 0xf2, 0xc3, 0xf2, 0x70, 0xd0, 0x43, 0x48, 0x0c, 0x47, 0x73, 0x67, 0x52, 0x34, 0x9e,
- 0x2b, 0x38, 0x30, 0x9b, 0x14, 0xc2, 0xab, 0x28, 0xc4, 0x3d, 0x13, 0x84, 0x20, 0x6a, 0x11, 0xd3,
- 0xf3, 0x3d, 0x85, 0xe5, 0x18, 0xe5, 0x21, 0xdd, 0xa1, 0x4e, 0xdb, 0xd6, 0x7b, 0xc2, 0x41, 0x35,
- 0x2c, 0x97, 0x06, 0x45, 0xc2, 0xca, 0xb5, 0x74, 0xee, 0x23, 0xcb, 0x31, 0x7a, 0x00, 0xb1, 0x2e,
- 0x71, 0xbb, 0x54, 0x8d, 0xc9, 0x63, 0xb8, 0x3d, 0xc9, 0xe7, 0x75, 0xa1, 0xbc, 0x11, 0xc2, 0x9e,
- 0x15, 0xfa, 0x3b, 0x44, 0x1c, 0xd7, 0x54, 0x13, 0xd2, 0xf8, 0xd6, 0xc4, 0xf4, 0xb9, 0xe6, 0x46,
- 0x08, 0x0b, 0x0b, 0x54, 0x87, 0xd4, 0x91, 0xee, 0x70, 0xd6, 0xb5, 0x89, 0xa9, 0xa6, 0xde, 0xc3,
- 0xa7, 0x01, 0xf3, 0x8d, 0xc0, 0x60, 0x23, 0x84, 0xfb, 0xd6, 0xe8, 0x05, 0xfc, 0x9e, 0x9e, 0xf4,
- 0x98, 0x45, 0x2d, 0xae, 0x13, 0x43, 0xeb, 0xc3, 0x82, 0x84, 0xfd, 0xeb, 0x24, 0xd8, 0x5a, 0xdf,
- 0x78, 0x70, 0x87, 0x59, 0x3a, 0x46, 0x8e, 0x56, 0x21, 0xe1, 0xb8, 0xa6, 0x49, 0xec, 0x53, 0x35,
- 0x2d, 0xe1, 0x17, 0xaf, 0x10, 0xb4, 0x50, 0xdf, 0x08, 0xe1, 0xc0, 0x12, 0xd5, 0x21, 0x69, 0x52,
- 0x4e, 0x3a, 0x84, 0x13, 0x35, 0x23, 0xb9, 0xb2, 0x38, 0x81, 0x7e, 0x8f, 0xe8, 0xe9, 0x3e, 0x31,
- 0xdc, 0xf3, 0x4a, 0x0a, 0xcc, 0x2b, 0x71, 0x88, 0x8a, 0xdf, 0xcd, 0x68, 0x32, 0x9a, 0x8d, 0x6d,
- 0x46, 0x93, 0xf1, 0x6c, 0x62, 0x33, 0x9a, 0x4c, 0x66, 0x53, 0x85, 0x27, 0x10, 0x93, 0xc9, 0x42,
- 0xbb, 0x90, 0x16, 0x2a, 0x5a, 0x8f, 0xe9, 0x16, 0xbf, 0xf2, 0x6d, 0xb4, 0xe3, 0x9a, 0x07, 0xd4,
- 0x16, 0x77, 0xda, 0xae, 0xb0, 0xc3, 0xd0, 0x09, 0x86, 0x4e, 0xe1, 0x57, 0x05, 0x22, 0x4d, 0xd7,
- 0xfc, 0xfc, 0xc8, 0x88, 0xc1, 0x75, 0xd2, 0xed, 0xda, 0xb4, 0x2b, 0xab, 0x4c, 0xe3, 0xd4, 0xec,
- 0x31, 0x9b, 0x18, 0x3a, 0x3f, 0x95, 0x84, 0x9e, 0x5e, 0xf9, 0xdb, 0x24, 0xf4, 0x72, 0xdf, 0xbc,
- 0xd5, 0xb7, 0xc6, 0x73, 0x64, 0xac, 0x1c, 0xdd, 0x84, 0x8c, 0xee, 0x68, 0x26, 0xb3, 0x18, 0x67,
- 0x96, 0xde, 0x96, 0xb5, 0x91, 0xc4, 0x69, 0xdd, 0xd9, 0x0e, 0x44, 0x85, 0xef, 0x14, 0x48, 0xf5,
- 0x09, 0xd0, 0x1c, 0x17, 0xf3, 0xca, 0x95, 0xa9, 0xfb, 0x65, 0x84, 0x5d, 0xf8, 0x59, 0x81, 0xd9,
- 0x71, 0xbc, 0x47, 0xcf, 0xc7, 0x85, 0xf7, 0xe0, 0x63, 0x4a, 0xe8, 0x0b, 0x89, 0xf4, 0x19, 0x24,
- 0xfc, 0x0a, 0x44, 0x8f, 0xc7, 0xc5, 0xf6, 0x97, 0x2b, 0xd6, 0xef, 0xf8, 0x4a, 0x38, 0x0b, 0xc3,
- 0xcc, 0x08, 0x9f, 0xd1, 0x36, 0x00, 0xe1, 0xdc, 0xd6, 0x0f, 0x5c, 0x4e, 0x1d, 0x35, 0xf1, 0x31,
- 0xf5, 0x3d, 0x00, 0x80, 0x4a, 0x30, 0xeb, 0x70, 0x62, 0x73, 0x8d, 0xeb, 0x26, 0xd5, 0x5c, 0x4b,
- 0x3f, 0xd1, 0x2c, 0x62, 0x31, 0x79, 0x5c, 0x71, 0x7c, 0x4d, 0xae, 0xb5, 0x74, 0x93, 0xee, 0x59,
- 0xfa, 0xc9, 0x0e, 0xb1, 0x18, 0xfa, 0x23, 0x4c, 0x8f, 0xa8, 0x46, 0xa4, 0x6a, 0x86, 0x0f, 0x6a,
- 0x2d, 0x40, 0x8a, 0x38, 0x5a, 0x87, 0xb9, 0x07, 0x06, 0x55, 0xa3, 0x79, 0x65, 0x49, 0xd9, 0x08,
- 0xe1, 0x24, 0x71, 0xaa, 0x52, 0x82, 0xae, 0x43, 0x9c, 0x38, 0x9a, 0x6e, 0x71, 0x35, 0x9e, 0x57,
- 0x96, 0xb2, 0xe2, 0xc6, 0x27, 0x4e, 0xdd, 0xe2, 0x68, 0x0b, 0x52, 0xf4, 0x84, 0x9a, 0x3d, 0x83,
- 0xd8, 0x8e, 0x1a, 0x93, 0xc1, 0x2d, 0x4d, 0xa6, 0x87, 0x67, 0xe0, 0x47, 0xd7, 0x07, 0x40, 0xb3,
- 0x10, 0x3b, 0x34, 0x48, 0xd7, 0x51, 0x93, 0x79, 0x65, 0x69, 0x0a, 0x7b, 0x93, 0x4a, 0x02, 0x62,
- 0xc7, 0xe2, 0x34, 0x36, 0xa3, 0x49, 0x25, 0x1b, 0x2e, 0xfc, 0x18, 0x01, 0x74, 0x91, 0x56, 0x23,
- 0xe7, 0x9c, 0xfa, 0x42, 0xcf, 0x79, 0x16, 0x62, 0x6d, 0xe6, 0x5a, 0x5c, 0x9e, 0x71, 0x1c, 0x7b,
- 0x13, 0x84, 0xbc, 0xbe, 0x19, 0xf3, 0xcf, 0x5d, 0xb6, 0xc4, 0x5b, 0x30, 0x75, 0xe0, 0xb6, 0x5f,
- 0x50, 0xae, 0x49, 0x1d, 0x47, 0x8d, 0xe7, 0x23, 0x02, 0xce, 0x13, 0xae, 0x4a, 0x19, 0x5a, 0x84,
- 0x19, 0x7a, 0xd2, 0x33, 0xf4, 0xb6, 0xce, 0xb5, 0x03, 0xe6, 0x5a, 0x1d, 0x8f, 0x61, 0x0a, 0x9e,
- 0x0e, 0xc4, 0x15, 0x29, 0x1d, 0xce, 0x53, 0xf2, 0xb3, 0xe5, 0x09, 0x06, 0xf2, 0x24, 0xa2, 0x30,
- 0x75, 0x4b, 0x36, 0x42, 0x65, 0x43, 0xc1, 0x62, 0x22, 0x65, 0xe4, 0x44, 0xcd, 0x48, 0x59, 0x18,
- 0x8b, 0x89, 0x68, 0x52, 0x8e, 0x6b, 0x6a, 0xe2, 0xd7, 0xd4, 0x2d, 0xef, 0x97, 0x9c, 0x68, 0x7e,
- 0x7a, 0xff, 0x13, 0x87, 0x85, 0xf7, 0x5e, 0x20, 0x23, 0x99, 0x56, 0xbe, 0xfa, 0x4c, 0xcf, 0x8a,
- 0xb7, 0x27, 0x31, 0xa8, 0xac, 0xad, 0x6b, 0xd8, 0x9b, 0x88, 0xe7, 0xdf, 0xbf, 0xa9, 0xcd, 0xbc,
- 0xec, 0xcb, 0x27, 0x55, 0x1c, 0xa7, 0x84, 0x44, 0xa6, 0x1e, 0x75, 0x21, 0xd9, 0x63, 0x8e, 0xce,
- 0xf5, 0x63, 0x2a, 0xab, 0x25, 0xbd, 0x52, 0xfb, 0xa4, 0x6b, 0xb9, 0x58, 0x91, 0xbc, 0x72, 0x82,
- 0x27, 0x45, 0x00, 0x2e, 0x36, 0xb2, 0xe4, 0x45, 0x7a, 0x4c, 0xfd, 0x97, 0xd9, 0xe7, 0xdd, 0x28,
- 0x00, 0xbf, 0x84, 0x54, 0x43, 0xc4, 0x4d, 0x7f, 0x2a, 0x71, 0x7d, 0x8a, 0x66, 0xc6, 0x50, 0x74,
- 0x6a, 0x80, 0xa2, 0xe8, 0x36, 0x4c, 0xcb, 0xc3, 0xe7, 0x47, 0x36, 0x75, 0x8e, 0x98, 0xd1, 0x51,
- 0xa7, 0xc5, 0x32, 0x9e, 0x12, 0xd2, 0x56, 0x20, 0x9c, 0x5f, 0x83, 0x84, 0x1f, 0x0d, 0x9a, 0x83,
- 0x38, 0x3b, 0x3c, 0x74, 0x28, 0x97, 0xaf, 0xf0, 0x6b, 0xd8, 0x9f, 0x5d, 0x2c, 0x63, 0xf1, 0x6f,
- 0x20, 0x3a, 0x5c, 0xc6, 0x97, 0x55, 0x44, 0xe1, 0x55, 0x04, 0xb2, 0xa3, 0x0d, 0xe7, 0x2b, 0x69,
- 0x28, 0xe3, 0xe9, 0x9f, 0x1d, 0xa0, 0xbf, 0x47, 0x7e, 0x1d, 0x66, 0xfe, 0xe5, 0x12, 0x8b, 0xeb,
- 0x06, 0xd5, 0xe4, 0x2d, 0xef, 0x5d, 0x74, 0xe9, 0x95, 0x87, 0x1f, 0xda, 0x89, 0x8b, 0x32, 0xc2,
- 0x32, 0x7f, 0xec, 0xc3, 0xe1, 0xe9, 0x00, 0x58, 0x2e, 0x5c, 0xd2, 0x5d, 0xe6, 0x57, 0x61, 0x66,
- 0xc4, 0x10, 0xcd, 0x43, 0x32, 0x30, 0x95, 0xd9, 0x54, 0xf0, 0xf9, 0x5c, 0x80, 0x48, 0x37, 0xe5,
- 0xf9, 0x28, 0x78, 0xa8, 0x33, 0xbd, 0x8c, 0x40, 0x32, 0xe0, 0x1e, 0x7a, 0x0e, 0xbf, 0x3b, 0xd4,
- 0x0d, 0x4e, 0x6d, 0xda, 0xd1, 0x3e, 0x35, 0x5f, 0x28, 0x40, 0x2a, 0xf7, 0xf3, 0x76, 0x31, 0x0d,
- 0xe1, 0x49, 0x7d, 0x3d, 0x72, 0xf5, 0xbe, 0xfe, 0x04, 0x12, 0x4e, 0x8f, 0x58, 0x9a, 0xde, 0x91,
- 0x09, 0xcc, 0x54, 0x1e, 0x0a, 0x47, 0x7e, 0x78, 0x7b, 0xe3, 0x1f, 0x5d, 0x36, 0xe2, 0xbb, 0xce,
- 0x4a, 0x6d, 0x66, 0x18, 0xb4, 0xcd, 0x99, 0x5d, 0xea, 0x89, 0xd7, 0x50, 0x49, 0xb7, 0x38, 0xb5,
- 0x2d, 0x62, 0x94, 0xc4, 0xac, 0xd8, 0xec, 0x11, 0xab, 0x5e, 0xc5, 0x71, 0x01, 0x58, 0xef, 0xa0,
- 0x67, 0x90, 0xe4, 0x36, 0x69, 0x53, 0x81, 0x1d, 0x93, 0xd8, 0x65, 0x1f, 0xfb, 0x9f, 0x1f, 0x8e,
- 0xdd, 0x12, 0x48, 0xf5, 0x2a, 0x4e, 0x48, 0xc8, 0x7a, 0x67, 0xe4, 0xb1, 0x70, 0xf7, 0xbf, 0x0a,
- 0xcc, 0x8d, 0x7f, 0x22, 0xa2, 0x45, 0xb8, 0x55, 0x5e, 0x5f, 0xc7, 0xb5, 0xf5, 0x72, 0xab, 0xde,
- 0xd8, 0xd1, 0x5a, 0xb5, 0xed, 0xdd, 0x06, 0x2e, 0x6f, 0xd5, 0x5b, 0x4f, 0xb4, 0xbd, 0x9d, 0xe6,
- 0x6e, 0x6d, 0xb5, 0xbe, 0x56, 0xaf, 0x55, 0xb3, 0x21, 0x74, 0x13, 0x16, 0x2e, 0x53, 0xac, 0xd6,
- 0xb6, 0x5a, 0xe5, 0xac, 0x82, 0xee, 0x40, 0xe1, 0x32, 0x95, 0xd5, 0xbd, 0xed, 0xbd, 0xad, 0x72,
- 0xab, 0xbe, 0x5f, 0xcb, 0x86, 0xef, 0x3e, 0x87, 0xe9, 0x73, 0xbe, 0xae, 0xc9, 0xfb, 0xed, 0x06,
- 0xfc, 0xa1, 0x5a, 0x6e, 0x95, 0xb5, 0xdd, 0x46, 0x7d, 0xa7, 0xa5, 0xad, 0x6d, 0x95, 0xd7, 0x9b,
- 0x5a, 0xb5, 0xa1, 0xed, 0x34, 0x5a, 0xda, 0x5e, 0xb3, 0x96, 0x0d, 0xa1, 0x3f, 0xc3, 0xe2, 0x05,
- 0x85, 0x9d, 0x86, 0x86, 0x6b, 0xab, 0x0d, 0x5c, 0xad, 0x55, 0xb5, 0xfd, 0xf2, 0xd6, 0x5e, 0x4d,
- 0xdb, 0x2e, 0x37, 0x1f, 0x65, 0x95, 0xca, 0xff, 0x95, 0xd7, 0x67, 0x39, 0xe5, 0xcd, 0x59, 0x4e,
- 0xf9, 0xe9, 0x2c, 0xa7, 0xbc, 0x7c, 0x97, 0x0b, 0xbd, 0x79, 0x97, 0x0b, 0x7d, 0xff, 0x2e, 0x17,
- 0x82, 0x9b, 0x3a, 0x9b, 0x50, 0x51, 0x95, 0x8c, 0xff, 0x35, 0x64, 0x57, 0x2c, 0xec, 0x2a, 0x4f,
- 0x6b, 0x1f, 0x9c, 0x0f, 0xef, 0x03, 0x59, 0x97, 0x5a, 0x03, 0xdf, 0xec, 0xfe, 0x17, 0xce, 0x35,
- 0x7a, 0xd4, 0x6a, 0x9d, 0x83, 0x48, 0x78, 0xff, 0x73, 0x87, 0x53, 0xdc, 0x5f, 0x3e, 0x88, 0x4b,
- 0xab, 0xfb, 0xbf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x00, 0xa3, 0x78, 0x2c, 0xfd, 0x13, 0x00, 0x00,
-}
-
-func (m *MetricsData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MetricsData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MetricsData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceMetrics) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceMetrics) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DeprecatedScopeMetrics) > 0 {
- for iNdEx := len(m.DeprecatedScopeMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DeprecatedScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeMetrics) > 0 {
- for iNdEx := len(m.ScopeMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeMetrics) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeMetrics) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Metrics) > 0 {
- for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Metric) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Metadata) > 0 {
- for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x62
- }
- }
- if m.Data != nil {
- {
- size := m.Data.Size()
- i -= size
- if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Unit) > 0 {
- i -= len(m.Unit)
- copy(dAtA[i:], m.Unit)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Description) > 0 {
- i -= len(m.Description)
- copy(dAtA[i:], m.Description)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Description)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Metric_Gauge) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Gauge != nil {
- {
- size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Sum != nil {
- {
- size, err := m.Sum.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_Histogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Histogram != nil {
- {
- size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ExponentialHistogram != nil {
- {
- size, err := m.ExponentialHistogram.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_Summary) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Summary != nil {
- {
- size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- return len(dAtA) - i, nil
-}
-func (m *Gauge) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Gauge) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Sum) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.IsMonotonic {
- i--
- if m.IsMonotonic {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if m.AggregationTemporality != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x10
- }
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Histogram) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.AggregationTemporality != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x10
- }
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExponentialHistogram) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.AggregationTemporality != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x10
- }
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Summary) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Summary) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *NumberDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *NumberDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NumberDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x40
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if m.Value != nil {
- {
- size := m.Value.Size()
- i -= size
- if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Exemplars) > 0 {
- for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *NumberDataPoint_AsDouble) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NumberDataPoint_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
- i--
- dAtA[i] = 0x21
- return len(dAtA) - i, nil
-}
-func (m *NumberDataPoint_AsInt) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NumberDataPoint_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
- i--
- dAtA[i] = 0x31
- return len(dAtA) - i, nil
-}
-func (m *HistogramDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HistogramDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Max_ != nil {
- {
- size := m.Max_.Size()
- i -= size
- if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Min_ != nil {
- {
- size := m.Min_.Size()
- i -= size
- if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x50
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- }
- if len(m.Exemplars) > 0 {
- for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- }
- if len(m.ExplicitBounds) > 0 {
- for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- {
- f8 := math.Float64bits(float64(m.ExplicitBounds[iNdEx]))
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f8))
- }
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8))
- i--
- dAtA[i] = 0x3a
- }
- if len(m.BucketCounts) > 0 {
- for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx]))
- }
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8))
- i--
- dAtA[i] = 0x32
- }
- if m.Sum_ != nil {
- {
- size := m.Sum_.Size()
- i -= size
- if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Count != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
- i--
- dAtA[i] = 0x21
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
- i--
- dAtA[i] = 0x29
- return len(dAtA) - i, nil
-}
-func (m *HistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min))))
- i--
- dAtA[i] = 0x59
- return len(dAtA) - i, nil
-}
-func (m *HistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max))))
- i--
- dAtA[i] = 0x61
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExponentialHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.ZeroThreshold != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
- i--
- dAtA[i] = 0x71
- }
- if m.Max_ != nil {
- {
- size := m.Max_.Size()
- i -= size
- if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Min_ != nil {
- {
- size := m.Min_.Size()
- i -= size
- if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Exemplars) > 0 {
- for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- }
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x50
- }
- {
- size, err := m.Negative.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- {
- size, err := m.Positive.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- if m.ZeroCount != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ZeroCount))
- i--
- dAtA[i] = 0x39
- }
- if m.Scale != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Scale)<<1)^uint32((m.Scale>>31))))
- i--
- dAtA[i] = 0x30
- }
- if m.Sum_ != nil {
- {
- size := m.Sum_.Size()
- i -= size
- if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Count != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
- i--
- dAtA[i] = 0x21
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExponentialHistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
- i--
- dAtA[i] = 0x29
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min))))
- i--
- dAtA[i] = 0x61
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max))))
- i--
- dAtA[i] = 0x69
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint_Buckets) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.BucketCounts) > 0 {
- dAtA12 := make([]byte, len(m.BucketCounts)*10)
- var j11 int
- for _, num := range m.BucketCounts {
- for num >= 1<<7 {
- dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j11++
- }
- dAtA12[j11] = uint8(num)
- j11++
- }
- i -= j11
- copy(dAtA[i:], dAtA12[:j11])
- i = encodeVarintMetrics(dAtA, i, uint64(j11))
- i--
- dAtA[i] = 0x12
- }
- if m.Offset != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SummaryDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SummaryDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SummaryDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x40
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if len(m.QuantileValues) > 0 {
- for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.QuantileValues[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- if m.Sum != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
- i--
- dAtA[i] = 0x29
- }
- if m.Count != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
- i--
- dAtA[i] = 0x21
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Value != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
- i--
- dAtA[i] = 0x11
- }
- if m.Quantile != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile))))
- i--
- dAtA[i] = 0x9
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Exemplar) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.FilteredAttributes) > 0 {
- for iNdEx := len(m.FilteredAttributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.FilteredAttributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if m.Value != nil {
- {
- size := m.Value.Size()
- i -= size
- if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Exemplar_AsDouble) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Exemplar_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
- i--
- dAtA[i] = 0x19
- return len(dAtA) - i, nil
-}
-func (m *Exemplar_AsInt) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Exemplar_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
- i--
- dAtA[i] = 0x31
- return len(dAtA) - i, nil
-}
-func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
- offset -= sovMetrics(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *MetricsData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for _, e := range m.ResourceMetrics {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceMetrics) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if len(m.ScopeMetrics) > 0 {
- for _, e := range m.ScopeMetrics {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if len(m.DeprecatedScopeMetrics) > 0 {
- for _, e := range m.DeprecatedScopeMetrics {
- l = e.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *ScopeMetrics) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if len(m.Metrics) > 0 {
- for _, e := range m.Metrics {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-
-func (m *Metric) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- l = len(m.Description)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- l = len(m.Unit)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Data != nil {
- n += m.Data.Size()
- }
- if len(m.Metadata) > 0 {
- for _, e := range m.Metadata {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *Metric_Gauge) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Gauge != nil {
- l = m.Gauge.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Sum != nil {
- l = m.Sum.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_Histogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Histogram != nil {
- l = m.Histogram.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_ExponentialHistogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ExponentialHistogram != nil {
- l = m.ExponentialHistogram.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_Summary) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Summary != nil {
- l = m.Summary.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Gauge) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovMetrics(uint64(m.AggregationTemporality))
- }
- if m.IsMonotonic {
- n += 2
- }
- return n
-}
-
-func (m *Histogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovMetrics(uint64(m.AggregationTemporality))
- }
- return n
-}
-
-func (m *ExponentialHistogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovMetrics(uint64(m.AggregationTemporality))
- }
- return n
-}
-
-func (m *Summary) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *NumberDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Value != nil {
- n += m.Value.Size()
- }
- if len(m.Exemplars) > 0 {
- for _, e := range m.Exemplars {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- return n
-}
-
-func (m *NumberDataPoint_AsDouble) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *NumberDataPoint_AsInt) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *HistogramDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Count != 0 {
- n += 9
- }
- if m.Sum_ != nil {
- n += m.Sum_.Size()
- }
- if len(m.BucketCounts) > 0 {
- n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8
- }
- if len(m.ExplicitBounds) > 0 {
- n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8
- }
- if len(m.Exemplars) > 0 {
- for _, e := range m.Exemplars {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- if m.Min_ != nil {
- n += m.Min_.Size()
- }
- if m.Max_ != nil {
- n += m.Max_.Size()
- }
- return n
-}
-
-func (m *HistogramDataPoint_Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *HistogramDataPoint_Min) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *HistogramDataPoint_Max) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Count != 0 {
- n += 9
- }
- if m.Sum_ != nil {
- n += m.Sum_.Size()
- }
- if m.Scale != 0 {
- n += 1 + sozMetrics(uint64(m.Scale))
- }
- if m.ZeroCount != 0 {
- n += 9
- }
- l = m.Positive.Size()
- n += 1 + l + sovMetrics(uint64(l))
- l = m.Negative.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- if len(m.Exemplars) > 0 {
- for _, e := range m.Exemplars {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Min_ != nil {
- n += m.Min_.Size()
- }
- if m.Max_ != nil {
- n += m.Max_.Size()
- }
- if m.ZeroThreshold != 0 {
- n += 9
- }
- return n
-}
-
-func (m *ExponentialHistogramDataPoint_Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint_Min) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint_Max) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint_Buckets) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Offset != 0 {
- n += 1 + sozMetrics(uint64(m.Offset))
- }
- if len(m.BucketCounts) > 0 {
- l = 0
- for _, e := range m.BucketCounts {
- l += sovMetrics(uint64(e))
- }
- n += 1 + sovMetrics(uint64(l)) + l
- }
- return n
-}
-
-func (m *SummaryDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Count != 0 {
- n += 9
- }
- if m.Sum != 0 {
- n += 9
- }
- if len(m.QuantileValues) > 0 {
- for _, e := range m.QuantileValues {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- return n
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Quantile != 0 {
- n += 9
- }
- if m.Value != 0 {
- n += 9
- }
- return n
-}
-
-func (m *Exemplar) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Value != nil {
- n += m.Value.Size()
- }
- l = m.SpanId.Size()
- n += 1 + l + sovMetrics(uint64(l))
- l = m.TraceId.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if len(m.FilteredAttributes) > 0 {
- for _, e := range m.FilteredAttributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *Exemplar_AsDouble) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *Exemplar_AsInt) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-
-func sovMetrics(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozMetrics(x uint64) (n int) {
- return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *MetricsData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MetricsData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MetricsData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{})
- if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceMetrics) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeMetrics = append(m.ScopeMetrics, &ScopeMetrics{})
- if err := m.ScopeMetrics[len(m.ScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeprecatedScopeMetrics = append(m.DeprecatedScopeMetrics, &ScopeMetrics{})
- if err := m.DeprecatedScopeMetrics[len(m.DeprecatedScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeMetrics) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeMetrics: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Metrics = append(m.Metrics, &Metric{})
- if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Metric) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Metric: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Description = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Unit = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Gauge{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Gauge{v}
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Sum{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Sum{v}
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Histogram{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Histogram{v}
- iNdEx = postIndex
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &ExponentialHistogram{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_ExponentialHistogram{v}
- iNdEx = postIndex
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Summary{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Summary{v}
- iNdEx = postIndex
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Metadata = append(m.Metadata, v11.KeyValue{})
- if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Gauge) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Gauge: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Sum) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Sum: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Sum: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsMonotonic = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Histogram) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &HistogramDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExponentialHistogram) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExponentialHistogram: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExponentialHistogram: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &ExponentialHistogramDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Summary) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Summary: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &SummaryDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *NumberDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NumberDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NumberDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &NumberDataPoint_AsDouble{float64(math.Float64frombits(v))}
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Exemplars = append(m.Exemplars, Exemplar{})
- if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
- }
- var v int64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &NumberDataPoint_AsInt{v}
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HistogramDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HistogramDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 5:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Sum_ = &HistogramDataPoint_Sum{float64(math.Float64frombits(v))}
- case 6:
- if wireType == 1 {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.BucketCounts = append(m.BucketCounts, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- elementCount = packedLen / 8
- if elementCount != 0 && len(m.BucketCounts) == 0 {
- m.BucketCounts = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.BucketCounts = append(m.BucketCounts, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
- }
- case 7:
- if wireType == 1 {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- v2 := float64(math.Float64frombits(v))
- m.ExplicitBounds = append(m.ExplicitBounds, v2)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- elementCount = packedLen / 8
- if elementCount != 0 && len(m.ExplicitBounds) == 0 {
- m.ExplicitBounds = make([]float64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- v2 := float64(math.Float64frombits(v))
- m.ExplicitBounds = append(m.ExplicitBounds, v2)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
- }
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Exemplars = append(m.Exemplars, Exemplar{})
- if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Min_ = &HistogramDataPoint_Min{float64(math.Float64frombits(v))}
- case 12:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Max_ = &HistogramDataPoint_Max{float64(math.Float64frombits(v))}
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExponentialHistogramDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExponentialHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 5:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Sum_ = &ExponentialHistogramDataPoint_Sum{float64(math.Float64frombits(v))}
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
- m.Scale = v
- case 7:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
- }
- m.ZeroCount = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.ZeroCount = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Positive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Negative.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Exemplars = append(m.Exemplars, Exemplar{})
- if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 12:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Min_ = &ExponentialHistogramDataPoint_Min{float64(math.Float64frombits(v))}
- case 13:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Max_ = &ExponentialHistogramDataPoint_Max{float64(math.Float64frombits(v))}
- case 14:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.ZeroThreshold = float64(math.Float64frombits(v))
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint_Buckets) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Buckets: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Buckets: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
- m.Offset = v
- case 2:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.BucketCounts = append(m.BucketCounts, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.BucketCounts) == 0 {
- m.BucketCounts = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.BucketCounts = append(m.BucketCounts, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SummaryDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SummaryDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 5:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Sum = float64(math.Float64frombits(v))
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.QuantileValues = append(m.QuantileValues, &SummaryDataPoint_ValueAtQuantile{})
- if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SummaryDataPoint_ValueAtQuantile) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Quantile = float64(math.Float64frombits(v))
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = float64(math.Float64frombits(v))
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Exemplar) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &Exemplar_AsDouble{float64(math.Float64frombits(v))}
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
- }
- var v int64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &Exemplar_AsInt{v}
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FilteredAttributes = append(m.FilteredAttributes, v11.KeyValue{})
- if err := m.FilteredAttributes[len(m.FilteredAttributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipMetrics(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupMetrics
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go
deleted file mode 100644
index 23e4ca9fb..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go
+++ /dev/null
@@ -1,5512 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/profiles/v1development/profiles.proto
-
-package v1development
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// Specifies the method of aggregating metric values, either DELTA (change since last report)
-// or CUMULATIVE (total since a fixed start time).
-type AggregationTemporality int32
-
-const (
- // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
- AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
- //* DELTA is an AggregationTemporality for a profiler which reports
- //changes since last report time. Successive metrics contain aggregation of
- //values from continuous and non-overlapping intervals.
- //
- //The values for a DELTA metric are based only on the time interval
- //associated with one measurement cycle. There is no dependency on
- //previous measurements like is the case for CUMULATIVE metrics.
- //
- //For example, consider a system measuring the number of requests that
- //it receives and reports the sum of these requests every second as a
- //DELTA metric:
- //
- //1. The system starts receiving at time=t_0.
- //2. A request is received, the system measures 1 request.
- //3. A request is received, the system measures 1 request.
- //4. A request is received, the system measures 1 request.
- //5. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0 to
- //t_0+1 with a value of 3.
- //6. A request is received, the system measures 1 request.
- //7. A request is received, the system measures 1 request.
- //8. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0+1 to
- //t_0+2 with a value of 2.
- AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
- //* CUMULATIVE is an AggregationTemporality for a profiler which
- //reports changes since a fixed start time. This means that current values
- //of a CUMULATIVE metric depend on all previous measurements since the
- //start time. Because of this, the sender is required to retain this state
- //in some form. If this state is lost or invalidated, the CUMULATIVE metric
- //values MUST be reset and a new fixed start time following the last
- //reported measurement time sent MUST be used.
- //
- //For example, consider a system measuring the number of requests that
- //it receives and reports the sum of these requests every second as a
- //CUMULATIVE metric:
- //
- //1. The system starts receiving at time=t_0.
- //2. A request is received, the system measures 1 request.
- //3. A request is received, the system measures 1 request.
- //4. A request is received, the system measures 1 request.
- //5. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0 to
- //t_0+1 with a value of 3.
- //6. A request is received, the system measures 1 request.
- //7. A request is received, the system measures 1 request.
- //8. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0 to
- //t_0+2 with a value of 5.
- //9. The system experiences a fault and loses state.
- //10. The system recovers and resumes receiving at time=t_1.
- //11. A request is received, the system measures 1 request.
- //12. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_1 to
- //t_1+1 with a value of 1.
- //
- //Note: Even though, when reporting changes since last report time, using
- //CUMULATIVE is valid, it is not recommended.
- AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
-)
-
-var AggregationTemporality_name = map[int32]string{
- 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
- 1: "AGGREGATION_TEMPORALITY_DELTA",
- 2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
-}
-
-var AggregationTemporality_value = map[string]int32{
- "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
- "AGGREGATION_TEMPORALITY_DELTA": 1,
- "AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
-}
-
-func (x AggregationTemporality) String() string {
- return proto.EnumName(AggregationTemporality_name, int32(x))
-}
-
-func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{0}
-}
-
-// ProfilesDictionary represents the profiles data shared across the
-// entire message being sent.
-type ProfilesDictionary struct {
- // Mappings from address ranges to the image/binary/library mapped
- // into that address range referenced by locations via Location.mapping_index.
- MappingTable []*Mapping `protobuf:"bytes,1,rep,name=mapping_table,json=mappingTable,proto3" json:"mapping_table,omitempty"`
- // Locations referenced by samples via Profile.location_indices.
- LocationTable []*Location `protobuf:"bytes,2,rep,name=location_table,json=locationTable,proto3" json:"location_table,omitempty"`
- // Functions referenced by locations via Line.function_index.
- FunctionTable []*Function `protobuf:"bytes,3,rep,name=function_table,json=functionTable,proto3" json:"function_table,omitempty"`
- // Links referenced by samples via Sample.link_index.
- LinkTable []*Link `protobuf:"bytes,4,rep,name=link_table,json=linkTable,proto3" json:"link_table,omitempty"`
- // A common table for strings referenced by various messages.
- // string_table[0] must always be "".
- StringTable []string `protobuf:"bytes,5,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"`
- // A common table for attributes referenced by various messages.
- AttributeTable []v1.KeyValue `protobuf:"bytes,6,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table"`
- // Represents a mapping between Attribute Keys and Units.
- AttributeUnits []*AttributeUnit `protobuf:"bytes,7,rep,name=attribute_units,json=attributeUnits,proto3" json:"attribute_units,omitempty"`
-}
-
-func (m *ProfilesDictionary) Reset() { *m = ProfilesDictionary{} }
-func (m *ProfilesDictionary) String() string { return proto.CompactTextString(m) }
-func (*ProfilesDictionary) ProtoMessage() {}
-func (*ProfilesDictionary) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{0}
-}
-func (m *ProfilesDictionary) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProfilesDictionary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ProfilesDictionary.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ProfilesDictionary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProfilesDictionary.Merge(m, src)
-}
-func (m *ProfilesDictionary) XXX_Size() int {
- return m.Size()
-}
-func (m *ProfilesDictionary) XXX_DiscardUnknown() {
- xxx_messageInfo_ProfilesDictionary.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProfilesDictionary proto.InternalMessageInfo
-
-func (m *ProfilesDictionary) GetMappingTable() []*Mapping {
- if m != nil {
- return m.MappingTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetLocationTable() []*Location {
- if m != nil {
- return m.LocationTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetFunctionTable() []*Function {
- if m != nil {
- return m.FunctionTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetLinkTable() []*Link {
- if m != nil {
- return m.LinkTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetStringTable() []string {
- if m != nil {
- return m.StringTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetAttributeTable() []v1.KeyValue {
- if m != nil {
- return m.AttributeTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetAttributeUnits() []*AttributeUnit {
- if m != nil {
- return m.AttributeUnits
- }
- return nil
-}
-
-// ProfilesData represents the profiles data that can be stored in persistent storage,
-// OR can be embedded by other protocols that transfer OTLP profiles data but do not
-// implement the OTLP protocol.
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type ProfilesData struct {
- // An array of ResourceProfiles.
- // For data coming from an SDK profiler, this array will typically contain one
- // element. Host-level profilers will usually create one ResourceProfile per
- // container, as well as one additional ResourceProfile grouping all samples
- // from non-containerized processes.
- // Other resource groupings are possible as well and clarified via
- // Resource.attributes and semantic conventions.
- ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"`
- // One instance of ProfilesDictionary
- Dictionary ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"`
-}
-
-func (m *ProfilesData) Reset() { *m = ProfilesData{} }
-func (m *ProfilesData) String() string { return proto.CompactTextString(m) }
-func (*ProfilesData) ProtoMessage() {}
-func (*ProfilesData) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{1}
-}
-func (m *ProfilesData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ProfilesData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProfilesData.Merge(m, src)
-}
-func (m *ProfilesData) XXX_Size() int {
- return m.Size()
-}
-func (m *ProfilesData) XXX_DiscardUnknown() {
- xxx_messageInfo_ProfilesData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProfilesData proto.InternalMessageInfo
-
-func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles {
- if m != nil {
- return m.ResourceProfiles
- }
- return nil
-}
-
-func (m *ProfilesData) GetDictionary() ProfilesDictionary {
- if m != nil {
- return m.Dictionary
- }
- return ProfilesDictionary{}
-}
-
-// A collection of ScopeProfiles from a Resource.
-type ResourceProfiles struct {
- // The resource for the profiles in this message.
- // If this field is not set then no resource info is known.
- Resource v11.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of ScopeProfiles that originate from a resource.
- ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_profiles" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} }
-func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) }
-func (*ResourceProfiles) ProtoMessage() {}
-func (*ResourceProfiles) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{2}
-}
-func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceProfiles) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceProfiles.Merge(m, src)
-}
-func (m *ResourceProfiles) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceProfiles) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceProfiles.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo
-
-func (m *ResourceProfiles) GetResource() v11.Resource {
- if m != nil {
- return m.Resource
- }
- return v11.Resource{}
-}
-
-func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles {
- if m != nil {
- return m.ScopeProfiles
- }
- return nil
-}
-
-func (m *ResourceProfiles) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Profiles produced by an InstrumentationScope.
-type ScopeProfiles struct {
- // The instrumentation scope information for the profiles in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v1.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of Profiles that originate from an instrumentation scope.
- Profiles []*Profile `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the profile data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all profiles in the "profiles" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} }
-func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) }
-func (*ScopeProfiles) ProtoMessage() {}
-func (*ScopeProfiles) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{3}
-}
-func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeProfiles) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeProfiles.Merge(m, src)
-}
-func (m *ScopeProfiles) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeProfiles) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeProfiles.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo
-
-func (m *ScopeProfiles) GetScope() v1.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v1.InstrumentationScope{}
-}
-
-func (m *ScopeProfiles) GetProfiles() []*Profile {
- if m != nil {
- return m.Profiles
- }
- return nil
-}
-
-func (m *ScopeProfiles) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// Represents a complete profile, including sample types, samples,
-// mappings to binaries, locations, functions, string table, and additional metadata.
-// It modifies and annotates pprof Profile with OpenTelemetry specific fields.
-//
-// Note that whilst fields in this message retain the name and field id from pprof in most cases
-// for ease of understanding data migration, it is not intended that pprof:Profile and
-// OpenTelemetry:Profile encoding be wire compatible.
-type Profile struct {
- // A description of the samples associated with each Sample.value.
- // For a cpu profile this might be:
- // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]]
- // For a heap profile, this might be:
- // [["allocations","count"], ["space","bytes"]],
- // If one of the values represents the number of events represented
- // by the sample, by convention it should be at index 0 and use
- // sample_type.unit == "count".
- SampleType []*ValueType `protobuf:"bytes,1,rep,name=sample_type,json=sampleType,proto3" json:"sample_type,omitempty"`
- // The set of samples recorded in this profile.
- Sample []*Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample,omitempty"`
- // References to locations in ProfilesDictionary.location_table.
- LocationIndices []int32 `protobuf:"varint,3,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"`
- // Time of collection (UTC) represented as nanoseconds past the epoch.
- TimeNanos int64 `protobuf:"varint,4,opt,name=time_nanos,json=timeNanos,proto3" json:"time_nanos,omitempty"`
- // Duration of the profile, if a duration makes sense.
- DurationNanos int64 `protobuf:"varint,5,opt,name=duration_nanos,json=durationNanos,proto3" json:"duration_nanos,omitempty"`
- // The kind of events between sampled occurrences.
- // e.g [ "cpu","cycles" ] or [ "heap","bytes" ]
- PeriodType ValueType `protobuf:"bytes,6,opt,name=period_type,json=periodType,proto3" json:"period_type"`
- // The number of events between sampled occurrences.
- Period int64 `protobuf:"varint,7,opt,name=period,proto3" json:"period,omitempty"`
- // Free-form text associated with the profile. The text is displayed as is
- // to the user by the tools that read profiles (e.g. by pprof). This field
- // should not be used to store any machine-readable information, it is only
- // for human-friendly content. The profile must stay functional if this field
- // is cleaned.
- CommentStrindices []int32 `protobuf:"varint,8,rep,packed,name=comment_strindices,json=commentStrindices,proto3" json:"comment_strindices,omitempty"`
- // Index into the sample_type array to the default sample type.
- DefaultSampleTypeIndex int32 `protobuf:"varint,9,opt,name=default_sample_type_index,json=defaultSampleTypeIndex,proto3" json:"default_sample_type_index,omitempty"`
- // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with
- // all zeroes is considered invalid.
- //
- // This field is required.
- ProfileId go_opentelemetry_io_collector_pdata_internal_data.ProfileID `protobuf:"bytes,10,opt,name=profile_id,json=profileId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.ProfileID" json:"profile_id"`
- // dropped_attributes_count is the number of attributes that were discarded. Attributes
- // can be discarded because their keys are too long or because there are too many
- // attributes. If this value is 0, then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,11,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present]
- OriginalPayloadFormat string `protobuf:"bytes,12,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"`
- // Original payload can be stored in this field. This can be useful for users who want to get the original payload.
- // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec.
- // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload.
- // If the original payload is in pprof format, it SHOULD not be included in this field.
- // The field is optional, however if it is present then equivalent converted data should be populated in other fields
- // of this message as far as is practicable.
- OriginalPayload []byte `protobuf:"bytes,13,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"`
- // References to attributes in attribute_table. [optional]
- // It is a collection of key/value pairs. Note, global attributes
- // like server name can be set using the resource API. Examples of attributes:
- //
- // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
- // "/http/server_latency": 300
- // "abc.com/myattribute": true
- // "abc.com/score": 10.239
- //
- // The OpenTelemetry API specification further restricts the allowed value types:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- AttributeIndices []int32 `protobuf:"varint,14,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
-}
-
-func (m *Profile) Reset() { *m = Profile{} }
-func (m *Profile) String() string { return proto.CompactTextString(m) }
-func (*Profile) ProtoMessage() {}
-func (*Profile) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{4}
-}
-func (m *Profile) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Profile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Profile.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Profile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Profile.Merge(m, src)
-}
-func (m *Profile) XXX_Size() int {
- return m.Size()
-}
-func (m *Profile) XXX_DiscardUnknown() {
- xxx_messageInfo_Profile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Profile proto.InternalMessageInfo
-
-func (m *Profile) GetSampleType() []*ValueType {
- if m != nil {
- return m.SampleType
- }
- return nil
-}
-
-func (m *Profile) GetSample() []*Sample {
- if m != nil {
- return m.Sample
- }
- return nil
-}
-
-func (m *Profile) GetLocationIndices() []int32 {
- if m != nil {
- return m.LocationIndices
- }
- return nil
-}
-
-func (m *Profile) GetTimeNanos() int64 {
- if m != nil {
- return m.TimeNanos
- }
- return 0
-}
-
-func (m *Profile) GetDurationNanos() int64 {
- if m != nil {
- return m.DurationNanos
- }
- return 0
-}
-
-func (m *Profile) GetPeriodType() ValueType {
- if m != nil {
- return m.PeriodType
- }
- return ValueType{}
-}
-
-func (m *Profile) GetPeriod() int64 {
- if m != nil {
- return m.Period
- }
- return 0
-}
-
-func (m *Profile) GetCommentStrindices() []int32 {
- if m != nil {
- return m.CommentStrindices
- }
- return nil
-}
-
-func (m *Profile) GetDefaultSampleTypeIndex() int32 {
- if m != nil {
- return m.DefaultSampleTypeIndex
- }
- return 0
-}
-
-func (m *Profile) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Profile) GetOriginalPayloadFormat() string {
- if m != nil {
- return m.OriginalPayloadFormat
- }
- return ""
-}
-
-func (m *Profile) GetOriginalPayload() []byte {
- if m != nil {
- return m.OriginalPayload
- }
- return nil
-}
-
-func (m *Profile) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-// Represents a mapping between Attribute Keys and Units.
-type AttributeUnit struct {
- // Index into string table.
- AttributeKeyStrindex int32 `protobuf:"varint,1,opt,name=attribute_key_strindex,json=attributeKeyStrindex,proto3" json:"attribute_key_strindex,omitempty"`
- // Index into string table.
- UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"`
-}
-
-func (m *AttributeUnit) Reset() { *m = AttributeUnit{} }
-func (m *AttributeUnit) String() string { return proto.CompactTextString(m) }
-func (*AttributeUnit) ProtoMessage() {}
-func (*AttributeUnit) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{5}
-}
-func (m *AttributeUnit) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AttributeUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AttributeUnit.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AttributeUnit) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AttributeUnit.Merge(m, src)
-}
-func (m *AttributeUnit) XXX_Size() int {
- return m.Size()
-}
-func (m *AttributeUnit) XXX_DiscardUnknown() {
- xxx_messageInfo_AttributeUnit.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AttributeUnit proto.InternalMessageInfo
-
-func (m *AttributeUnit) GetAttributeKeyStrindex() int32 {
- if m != nil {
- return m.AttributeKeyStrindex
- }
- return 0
-}
-
-func (m *AttributeUnit) GetUnitStrindex() int32 {
- if m != nil {
- return m.UnitStrindex
- }
- return 0
-}
-
-// A pointer from a profile Sample to a trace Span.
-// Connects a profile sample to a trace span, identified by unique trace and span IDs.
-type Link struct {
- // A unique identifier of a trace that this linked span is part of. The ID is a
- // 16-byte array.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for the linked span. The ID is an 8-byte array.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
-}
-
-func (m *Link) Reset() { *m = Link{} }
-func (m *Link) String() string { return proto.CompactTextString(m) }
-func (*Link) ProtoMessage() {}
-func (*Link) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{6}
-}
-func (m *Link) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Link.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Link) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Link.Merge(m, src)
-}
-func (m *Link) XXX_Size() int {
- return m.Size()
-}
-func (m *Link) XXX_DiscardUnknown() {
- xxx_messageInfo_Link.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Link proto.InternalMessageInfo
-
-// ValueType describes the type and units of a value, with an optional aggregation temporality.
-type ValueType struct {
- TypeStrindex int32 `protobuf:"varint,1,opt,name=type_strindex,json=typeStrindex,proto3" json:"type_strindex,omitempty"`
- UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"`
- AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1development.AggregationTemporality" json:"aggregation_temporality,omitempty"`
-}
-
-func (m *ValueType) Reset() { *m = ValueType{} }
-func (m *ValueType) String() string { return proto.CompactTextString(m) }
-func (*ValueType) ProtoMessage() {}
-func (*ValueType) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{7}
-}
-func (m *ValueType) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ValueType.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ValueType) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValueType.Merge(m, src)
-}
-func (m *ValueType) XXX_Size() int {
- return m.Size()
-}
-func (m *ValueType) XXX_DiscardUnknown() {
- xxx_messageInfo_ValueType.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValueType proto.InternalMessageInfo
-
-func (m *ValueType) GetTypeStrindex() int32 {
- if m != nil {
- return m.TypeStrindex
- }
- return 0
-}
-
-func (m *ValueType) GetUnitStrindex() int32 {
- if m != nil {
- return m.UnitStrindex
- }
- return 0
-}
-
-func (m *ValueType) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-// Each Sample records values encountered in some program
-// context. The program context is typically a stack trace, perhaps
-// augmented with auxiliary information like the thread-id, some
-// indicator of a higher level request being handled etc.
-type Sample struct {
- // locations_start_index along with locations_length refers to to a slice of locations in Profile.location_indices.
- LocationsStartIndex int32 `protobuf:"varint,1,opt,name=locations_start_index,json=locationsStartIndex,proto3" json:"locations_start_index,omitempty"`
- // locations_length along with locations_start_index refers to a slice of locations in Profile.location_indices.
- // Supersedes location_index.
- LocationsLength int32 `protobuf:"varint,2,opt,name=locations_length,json=locationsLength,proto3" json:"locations_length,omitempty"`
- // The type and unit of each value is defined by the corresponding
- // entry in Profile.sample_type. All samples must have the same
- // number of values, the same as the length of Profile.sample_type.
- // When aggregating multiple samples into a single sample, the
- // result has a list of values that is the element-wise sum of the
- // lists of the originals.
- Value []int64 `protobuf:"varint,3,rep,packed,name=value,proto3" json:"value,omitempty"`
- // References to attributes in ProfilesDictionary.attribute_table. [optional]
- AttributeIndices []int32 `protobuf:"varint,4,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
- // Reference to link in ProfilesDictionary.link_table. [optional]
- //
- // Types that are valid to be assigned to LinkIndex_:
- // *Sample_LinkIndex
- LinkIndex_ isSample_LinkIndex_ `protobuf_oneof:"link_index_"`
- // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected
- // to fall within the Profile's time range. [optional]
- TimestampsUnixNano []uint64 `protobuf:"varint,6,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"`
-}
-
-func (m *Sample) Reset() { *m = Sample{} }
-func (m *Sample) String() string { return proto.CompactTextString(m) }
-func (*Sample) ProtoMessage() {}
-func (*Sample) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{8}
-}
-func (m *Sample) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Sample) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Sample.Merge(m, src)
-}
-func (m *Sample) XXX_Size() int {
- return m.Size()
-}
-func (m *Sample) XXX_DiscardUnknown() {
- xxx_messageInfo_Sample.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Sample proto.InternalMessageInfo
-
-type isSample_LinkIndex_ interface {
- isSample_LinkIndex_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Sample_LinkIndex struct {
- LinkIndex int32 `protobuf:"varint,5,opt,name=link_index,json=linkIndex,proto3,oneof" json:"link_index,omitempty"`
-}
-
-func (*Sample_LinkIndex) isSample_LinkIndex_() {}
-
-func (m *Sample) GetLinkIndex_() isSample_LinkIndex_ {
- if m != nil {
- return m.LinkIndex_
- }
- return nil
-}
-
-func (m *Sample) GetLocationsStartIndex() int32 {
- if m != nil {
- return m.LocationsStartIndex
- }
- return 0
-}
-
-func (m *Sample) GetLocationsLength() int32 {
- if m != nil {
- return m.LocationsLength
- }
- return 0
-}
-
-func (m *Sample) GetValue() []int64 {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Sample) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-func (m *Sample) GetLinkIndex() int32 {
- if x, ok := m.GetLinkIndex_().(*Sample_LinkIndex); ok {
- return x.LinkIndex
- }
- return 0
-}
-
-func (m *Sample) GetTimestampsUnixNano() []uint64 {
- if m != nil {
- return m.TimestampsUnixNano
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Sample) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Sample_LinkIndex)(nil),
- }
-}
-
-// Describes the mapping of a binary in memory, including its address range,
-// file offset, and metadata like build ID
-type Mapping struct {
- // Address at which the binary (or DLL) is loaded into memory.
- MemoryStart uint64 `protobuf:"varint,1,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"`
- // The limit of the address range occupied by this mapping.
- MemoryLimit uint64 `protobuf:"varint,2,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"`
- // Offset in the binary that corresponds to the first mapped address.
- FileOffset uint64 `protobuf:"varint,3,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"`
- // The object this entry is loaded from. This can be a filename on
- // disk for the main binary and shared libraries, or virtual
- // abstractions like "[vdso]".
- FilenameStrindex int32 `protobuf:"varint,4,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"`
- // References to attributes in ProfilesDictionary.attribute_table. [optional]
- AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
- // The following fields indicate the resolution of symbolic info.
- HasFunctions bool `protobuf:"varint,6,opt,name=has_functions,json=hasFunctions,proto3" json:"has_functions,omitempty"`
- HasFilenames bool `protobuf:"varint,7,opt,name=has_filenames,json=hasFilenames,proto3" json:"has_filenames,omitempty"`
- HasLineNumbers bool `protobuf:"varint,8,opt,name=has_line_numbers,json=hasLineNumbers,proto3" json:"has_line_numbers,omitempty"`
- HasInlineFrames bool `protobuf:"varint,9,opt,name=has_inline_frames,json=hasInlineFrames,proto3" json:"has_inline_frames,omitempty"`
-}
-
-func (m *Mapping) Reset() { *m = Mapping{} }
-func (m *Mapping) String() string { return proto.CompactTextString(m) }
-func (*Mapping) ProtoMessage() {}
-func (*Mapping) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{9}
-}
-func (m *Mapping) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Mapping.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Mapping) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Mapping.Merge(m, src)
-}
-func (m *Mapping) XXX_Size() int {
- return m.Size()
-}
-func (m *Mapping) XXX_DiscardUnknown() {
- xxx_messageInfo_Mapping.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Mapping proto.InternalMessageInfo
-
-func (m *Mapping) GetMemoryStart() uint64 {
- if m != nil {
- return m.MemoryStart
- }
- return 0
-}
-
-func (m *Mapping) GetMemoryLimit() uint64 {
- if m != nil {
- return m.MemoryLimit
- }
- return 0
-}
-
-func (m *Mapping) GetFileOffset() uint64 {
- if m != nil {
- return m.FileOffset
- }
- return 0
-}
-
-func (m *Mapping) GetFilenameStrindex() int32 {
- if m != nil {
- return m.FilenameStrindex
- }
- return 0
-}
-
-func (m *Mapping) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-func (m *Mapping) GetHasFunctions() bool {
- if m != nil {
- return m.HasFunctions
- }
- return false
-}
-
-func (m *Mapping) GetHasFilenames() bool {
- if m != nil {
- return m.HasFilenames
- }
- return false
-}
-
-func (m *Mapping) GetHasLineNumbers() bool {
- if m != nil {
- return m.HasLineNumbers
- }
- return false
-}
-
-func (m *Mapping) GetHasInlineFrames() bool {
- if m != nil {
- return m.HasInlineFrames
- }
- return false
-}
-
-// Describes function and line table debug information.
-type Location struct {
- // Reference to mapping in ProfilesDictionary.mapping_table.
- // It can be unset if the mapping is unknown or not applicable for
- // this profile type.
- //
- // Types that are valid to be assigned to MappingIndex_:
- // *Location_MappingIndex
- MappingIndex_ isLocation_MappingIndex_ `protobuf_oneof:"mapping_index_"`
- // The instruction address for this location, if available. It
- // should be within [Mapping.memory_start...Mapping.memory_limit]
- // for the corresponding mapping. A non-leaf address may be in the
- // middle of a call instruction. It is up to display tools to find
- // the beginning of the instruction if necessary.
- Address uint64 `protobuf:"varint,2,opt,name=address,proto3" json:"address,omitempty"`
- // Multiple line indicates this location has inlined functions,
- // where the last entry represents the caller into which the
- // preceding entries were inlined.
- //
- // E.g., if memcpy() is inlined into printf:
- // line[0].function_name == "memcpy"
- // line[1].function_name == "printf"
- Line []*Line `protobuf:"bytes,3,rep,name=line,proto3" json:"line,omitempty"`
- // Provides an indication that multiple symbols map to this location's
- // address, for example due to identical code folding by the linker. In that
- // case the line information above represents one of the multiple
- // symbols. This field must be recomputed when the symbolization state of the
- // profile changes.
- IsFolded bool `protobuf:"varint,4,opt,name=is_folded,json=isFolded,proto3" json:"is_folded,omitempty"`
- // References to attributes in ProfilesDictionary.attribute_table. [optional]
- AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
-}
-
-func (m *Location) Reset() { *m = Location{} }
-func (m *Location) String() string { return proto.CompactTextString(m) }
-func (*Location) ProtoMessage() {}
-func (*Location) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{10}
-}
-func (m *Location) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Location.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Location) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Location.Merge(m, src)
-}
-func (m *Location) XXX_Size() int {
- return m.Size()
-}
-func (m *Location) XXX_DiscardUnknown() {
- xxx_messageInfo_Location.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Location proto.InternalMessageInfo
-
-type isLocation_MappingIndex_ interface {
- isLocation_MappingIndex_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Location_MappingIndex struct {
- MappingIndex int32 `protobuf:"varint,1,opt,name=mapping_index,json=mappingIndex,proto3,oneof" json:"mapping_index,omitempty"`
-}
-
-func (*Location_MappingIndex) isLocation_MappingIndex_() {}
-
-func (m *Location) GetMappingIndex_() isLocation_MappingIndex_ {
- if m != nil {
- return m.MappingIndex_
- }
- return nil
-}
-
-func (m *Location) GetMappingIndex() int32 {
- if x, ok := m.GetMappingIndex_().(*Location_MappingIndex); ok {
- return x.MappingIndex
- }
- return 0
-}
-
-func (m *Location) GetAddress() uint64 {
- if m != nil {
- return m.Address
- }
- return 0
-}
-
-func (m *Location) GetLine() []*Line {
- if m != nil {
- return m.Line
- }
- return nil
-}
-
-func (m *Location) GetIsFolded() bool {
- if m != nil {
- return m.IsFolded
- }
- return false
-}
-
-func (m *Location) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Location) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Location_MappingIndex)(nil),
- }
-}
-
-// Details a specific line in a source code, linked to a function.
-type Line struct {
- // Reference to function in ProfilesDictionary.function_table.
- FunctionIndex int32 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"`
- // Line number in source code. 0 means unset.
- Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"`
- // Column number in source code. 0 means unset.
- Column int64 `protobuf:"varint,3,opt,name=column,proto3" json:"column,omitempty"`
-}
-
-func (m *Line) Reset() { *m = Line{} }
-func (m *Line) String() string { return proto.CompactTextString(m) }
-func (*Line) ProtoMessage() {}
-func (*Line) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{11}
-}
-func (m *Line) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Line) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Line.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Line) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Line.Merge(m, src)
-}
-func (m *Line) XXX_Size() int {
- return m.Size()
-}
-func (m *Line) XXX_DiscardUnknown() {
- xxx_messageInfo_Line.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Line proto.InternalMessageInfo
-
-func (m *Line) GetFunctionIndex() int32 {
- if m != nil {
- return m.FunctionIndex
- }
- return 0
-}
-
-func (m *Line) GetLine() int64 {
- if m != nil {
- return m.Line
- }
- return 0
-}
-
-func (m *Line) GetColumn() int64 {
- if m != nil {
- return m.Column
- }
- return 0
-}
-
-// Describes a function, including its human-readable name, system name,
-// source file, and starting line number in the source.
-type Function struct {
- // Function name. Empty string if not available.
- NameStrindex int32 `protobuf:"varint,1,opt,name=name_strindex,json=nameStrindex,proto3" json:"name_strindex,omitempty"`
- // Function name, as identified by the system. For instance,
- // it can be a C++ mangled name. Empty string if not available.
- SystemNameStrindex int32 `protobuf:"varint,2,opt,name=system_name_strindex,json=systemNameStrindex,proto3" json:"system_name_strindex,omitempty"`
- // Source file containing the function. Empty string if not available.
- FilenameStrindex int32 `protobuf:"varint,3,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"`
- // Line number in source file. 0 means unset.
- StartLine int64 `protobuf:"varint,4,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"`
-}
-
-func (m *Function) Reset() { *m = Function{} }
-func (m *Function) String() string { return proto.CompactTextString(m) }
-func (*Function) ProtoMessage() {}
-func (*Function) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{12}
-}
-func (m *Function) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Function.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Function) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Function.Merge(m, src)
-}
-func (m *Function) XXX_Size() int {
- return m.Size()
-}
-func (m *Function) XXX_DiscardUnknown() {
- xxx_messageInfo_Function.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Function proto.InternalMessageInfo
-
-func (m *Function) GetNameStrindex() int32 {
- if m != nil {
- return m.NameStrindex
- }
- return 0
-}
-
-func (m *Function) GetSystemNameStrindex() int32 {
- if m != nil {
- return m.SystemNameStrindex
- }
- return 0
-}
-
-func (m *Function) GetFilenameStrindex() int32 {
- if m != nil {
- return m.FilenameStrindex
- }
- return 0
-}
-
-func (m *Function) GetStartLine() int64 {
- if m != nil {
- return m.StartLine
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.profiles.v1development.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value)
- proto.RegisterType((*ProfilesDictionary)(nil), "opentelemetry.proto.profiles.v1development.ProfilesDictionary")
- proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1development.ProfilesData")
- proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1development.ResourceProfiles")
- proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1development.ScopeProfiles")
- proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1development.Profile")
- proto.RegisterType((*AttributeUnit)(nil), "opentelemetry.proto.profiles.v1development.AttributeUnit")
- proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1development.Link")
- proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1development.ValueType")
- proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1development.Sample")
- proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1development.Mapping")
- proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1development.Location")
- proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1development.Line")
- proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1development.Function")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/profiles/v1development/profiles.proto", fileDescriptor_ddd0cf081a2fe76f)
-}
-
-var fileDescriptor_ddd0cf081a2fe76f = []byte{
- // 1617 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x4f, 0x1c, 0xc9,
- 0x15, 0xa6, 0xe7, 0x3e, 0x67, 0x2e, 0x0c, 0x15, 0x96, 0x9d, 0x6c, 0xb4, 0x30, 0x3b, 0xd6, 0x66,
- 0x67, 0x89, 0x16, 0x0c, 0x6c, 0xa2, 0x45, 0x89, 0xa2, 0x00, 0x03, 0xde, 0xb1, 0x31, 0x90, 0x62,
- 0x40, 0x71, 0x62, 0xa9, 0x53, 0x4c, 0xd7, 0x0c, 0x1d, 0xf7, 0x4d, 0x5d, 0x35, 0x88, 0x51, 0xfe,
- 0x82, 0x1f, 0xf2, 0x3b, 0xa2, 0xe4, 0x37, 0xe4, 0xd5, 0x8f, 0x56, 0x9e, 0xac, 0x3c, 0x38, 0x91,
- 0xfd, 0xe2, 0x44, 0xca, 0x7f, 0x88, 0xea, 0xd2, 0x3d, 0x17, 0x0f, 0x72, 0xda, 0x2f, 0x68, 0xea,
- 0x9c, 0xaf, 0xbe, 0x53, 0xe7, 0x52, 0xa7, 0x4e, 0x03, 0xbb, 0x7e, 0x40, 0x3d, 0x4e, 0x1d, 0xea,
- 0x52, 0x1e, 0x8e, 0x36, 0x83, 0xd0, 0xe7, 0xbe, 0xf8, 0xdb, 0xb7, 0x1d, 0xca, 0x36, 0x6f, 0xb6,
- 0x2c, 0x7a, 0x43, 0x1d, 0x3f, 0x70, 0xa9, 0xc7, 0x63, 0xf1, 0x86, 0x44, 0xa1, 0xf5, 0xa9, 0xad,
- 0x4a, 0xb8, 0x11, 0x63, 0xa6, 0xb6, 0x7e, 0xb6, 0x3c, 0xf0, 0x07, 0xbe, 0x22, 0x17, 0xbf, 0x14,
- 0xf8, 0xb3, 0xf5, 0x79, 0xc6, 0x7b, 0xbe, 0xeb, 0xfa, 0xde, 0xe6, 0xcd, 0x96, 0xfe, 0xa5, 0xb1,
- 0x1b, 0xf3, 0xb0, 0x21, 0x65, 0xfe, 0x30, 0xec, 0x51, 0x81, 0x8e, 0x7e, 0x2b, 0x7c, 0xf3, 0x55,
- 0x06, 0xd0, 0x99, 0x3e, 0x4c, 0xdb, 0xee, 0x71, 0xdb, 0xf7, 0x48, 0x38, 0x42, 0xbf, 0x81, 0x8a,
- 0x4b, 0x82, 0xc0, 0xf6, 0x06, 0x26, 0x27, 0x57, 0x0e, 0xad, 0x1b, 0x8d, 0x74, 0xab, 0xb4, 0xbd,
- 0xb3, 0xf1, 0xff, 0x3b, 0xb3, 0xf1, 0x58, 0x11, 0xe0, 0xb2, 0x66, 0xea, 0x0a, 0x22, 0xf4, 0x3b,
- 0xa8, 0x3a, 0x7e, 0x8f, 0x08, 0x43, 0x9a, 0x3a, 0x25, 0xa9, 0xbf, 0x4d, 0x42, 0x7d, 0xac, 0x19,
- 0x70, 0x25, 0xe2, 0x8a, 0xc9, 0xfb, 0x43, 0xaf, 0x37, 0x41, 0x9e, 0x4e, 0x4e, 0x7e, 0xa4, 0x19,
- 0x70, 0x25, 0xe2, 0x52, 0xe4, 0xa7, 0x00, 0x8e, 0xed, 0x3d, 0xd3, 0xc4, 0x19, 0x49, 0x7c, 0x3f,
- 0xd1, 0xa9, 0x6d, 0xef, 0x19, 0x2e, 0x0a, 0x0e, 0x45, 0xf8, 0x05, 0x94, 0x19, 0x0f, 0xc7, 0x31,
- 0xce, 0x36, 0xd2, 0xad, 0x22, 0x2e, 0x29, 0x99, 0x82, 0x5c, 0xc2, 0x22, 0xe1, 0x3c, 0xb4, 0xaf,
- 0x86, 0x9c, 0x6a, 0x54, 0x4e, 0x1a, 0xfe, 0x6a, 0xae, 0x61, 0x5d, 0x0a, 0x37, 0x5b, 0x1b, 0x8f,
- 0xe8, 0xe8, 0x92, 0x38, 0x43, 0xba, 0x9f, 0x79, 0xf1, 0x7a, 0x6d, 0x01, 0x57, 0x63, 0x16, 0xc5,
- 0x7b, 0x35, 0xc9, 0x3b, 0xf4, 0x6c, 0xce, 0xea, 0x79, 0xc9, 0xbb, 0x9b, 0xc4, 0xa1, 0xbd, 0x88,
- 0xe2, 0xc2, 0xb3, 0xf9, 0x84, 0x0d, 0xb1, 0x64, 0xcd, 0x7f, 0x1a, 0x50, 0x8e, 0x4b, 0x8b, 0x70,
- 0x82, 0x6c, 0x58, 0x8a, 0xaa, 0xcf, 0x8c, 0x18, 0x75, 0x61, 0xfd, 0x22, 0x89, 0x59, 0xac, 0x49,
- 0x22, 0x72, 0x5c, 0x0b, 0x67, 0x24, 0xc8, 0x02, 0xb0, 0xe2, 0x6a, 0xae, 0xa7, 0x1a, 0x46, 0xab,
- 0xb4, 0xfd, 0xcb, 0x24, 0x36, 0xde, 0xbf, 0x13, 0x3a, 0x92, 0x13, 0xbc, 0xcd, 0x77, 0x06, 0xd4,
- 0x66, 0x0f, 0x83, 0x1e, 0x41, 0x21, 0x3a, 0x4e, 0xdd, 0x90, 0x86, 0xbf, 0x9e, 0x6b, 0x38, 0xbe,
- 0x88, 0x37, 0x5b, 0xb1, 0x47, 0xda, 0x46, 0x4c, 0x80, 0x7e, 0x0f, 0x55, 0xd6, 0xf3, 0x83, 0x89,
- 0x78, 0xa5, 0x92, 0xa7, 0xe9, 0x5c, 0x30, 0xc4, 0xc1, 0xaa, 0xb0, 0xc9, 0x25, 0xfa, 0x1c, 0x80,
- 0xf5, 0xae, 0xa9, 0x4b, 0xcc, 0x61, 0xe8, 0xd4, 0xd3, 0x0d, 0xa3, 0x55, 0xc4, 0x45, 0x25, 0xb9,
- 0x08, 0x9d, 0x87, 0xb9, 0xc2, 0xbb, 0x7c, 0xed, 0xdf, 0xf9, 0xe6, 0x4b, 0x03, 0x2a, 0x53, 0x3c,
- 0xe8, 0x14, 0xb2, 0x92, 0x49, 0x3b, 0xb9, 0xf3, 0x81, 0x82, 0xec, 0x78, 0x8c, 0x87, 0x43, 0x71,
- 0x1e, 0x79, 0x5f, 0x25, 0x97, 0x76, 0x57, 0xf1, 0xa0, 0x53, 0x28, 0xcc, 0x78, 0xb9, 0xf3, 0x11,
- 0x19, 0xc3, 0x31, 0xc9, 0x07, 0x5c, 0x6b, 0xfe, 0x25, 0x07, 0x79, 0xbd, 0x09, 0x5d, 0x42, 0x89,
- 0x11, 0x37, 0x70, 0xa8, 0xc9, 0x47, 0x41, 0xd4, 0xed, 0x7e, 0x9a, 0xc4, 0xbc, 0xbc, 0x6d, 0xdd,
- 0x51, 0x40, 0x31, 0x28, 0x26, 0xf1, 0x1b, 0x3d, 0x84, 0x9c, 0x5a, 0x69, 0x8f, 0xb6, 0x13, 0xe5,
- 0x4d, 0xee, 0xc4, 0x9a, 0x01, 0x7d, 0x0d, 0xb5, 0xb8, 0x73, 0xda, 0x9e, 0x65, 0xf7, 0x28, 0x93,
- 0xed, 0x2d, 0x8b, 0x17, 0x23, 0x79, 0x47, 0x89, 0x85, 0xe7, 0xdc, 0x76, 0xa9, 0xe9, 0x11, 0xcf,
- 0x67, 0xf5, 0x4c, 0xc3, 0x68, 0xa5, 0x71, 0x51, 0x48, 0x4e, 0x84, 0x00, 0x7d, 0x09, 0x55, 0x6b,
- 0x18, 0x2a, 0x26, 0x05, 0xc9, 0x4a, 0x48, 0x25, 0x92, 0x2a, 0xd8, 0x53, 0x28, 0x05, 0x34, 0xb4,
- 0x7d, 0x4b, 0x05, 0x25, 0x27, 0xf3, 0xfc, 0x71, 0x41, 0x89, 0x2e, 0x8f, 0xe2, 0x93, 0xa1, 0x59,
- 0x81, 0x9c, 0x5a, 0xd5, 0xf3, 0xd2, 0xb8, 0x5e, 0xa1, 0x6f, 0x00, 0x89, 0xaa, 0xa1, 0x1e, 0x37,
- 0x65, 0x27, 0x54, 0x8e, 0x16, 0xa4, 0xa3, 0x4b, 0x5a, 0x73, 0x1e, 0x2b, 0xd0, 0x2e, 0xfc, 0xd0,
- 0xa2, 0x7d, 0x32, 0x74, 0xb8, 0x39, 0x91, 0x41, 0x11, 0x20, 0x7a, 0x5b, 0x2f, 0x36, 0x8c, 0x56,
- 0x16, 0xaf, 0x68, 0xc0, 0x79, 0x9c, 0x97, 0x8e, 0xd0, 0xa2, 0x2b, 0x00, 0x7d, 0x6e, 0xd3, 0xb6,
- 0xea, 0xd0, 0x30, 0x5a, 0xe5, 0xfd, 0x03, 0x71, 0xce, 0x7f, 0xbc, 0x5e, 0xfb, 0xf9, 0xc0, 0x9f,
- 0x71, 0xd4, 0x16, 0x6f, 0xae, 0xe3, 0xd0, 0x1e, 0xf7, 0xc3, 0xcd, 0xc0, 0x22, 0x9c, 0x6c, 0xda,
- 0x1e, 0xa7, 0xa1, 0x47, 0x9c, 0x4d, 0xb1, 0x8a, 0x0a, 0xb0, 0xd3, 0xc6, 0x45, 0x4d, 0xdb, 0xb1,
- 0xd0, 0x77, 0x50, 0xb7, 0x42, 0x3f, 0x08, 0xa8, 0x65, 0xc6, 0xed, 0x91, 0x99, 0x3d, 0x7f, 0xe8,
- 0xf1, 0x7a, 0xa9, 0x61, 0xb4, 0x2a, 0x78, 0x45, 0xeb, 0xe3, 0x66, 0xca, 0x0e, 0x84, 0x16, 0xfd,
- 0x0c, 0x3e, 0xf5, 0x43, 0x7b, 0x60, 0x7b, 0xc4, 0x31, 0x03, 0x32, 0x72, 0x7c, 0x62, 0x99, 0x7d,
- 0x3f, 0x74, 0x09, 0xaf, 0x97, 0x65, 0x29, 0x7f, 0x12, 0xa9, 0xcf, 0x94, 0xf6, 0x48, 0x2a, 0x45,
- 0x99, 0xcc, 0xee, 0xab, 0x57, 0x84, 0x6f, 0x78, 0x71, 0x66, 0x03, 0xfa, 0x09, 0x2c, 0x8d, 0x5f,
- 0x81, 0x28, 0xd2, 0x55, 0x19, 0xe9, 0x5a, 0xac, 0xd0, 0x35, 0xd5, 0xfc, 0x03, 0x54, 0xa6, 0xfa,
- 0x3d, 0xfa, 0x16, 0x56, 0xc6, 0xbb, 0x9f, 0xd1, 0x91, 0x4e, 0x17, 0xbd, 0x95, 0x1d, 0x21, 0x8b,
- 0x97, 0x63, 0xed, 0x23, 0x3a, 0x3a, 0xd7, 0x3a, 0x74, 0x0f, 0x2a, 0xe2, 0xbd, 0x19, 0x83, 0x53,
- 0x12, 0x5c, 0x16, 0xc2, 0x08, 0xd4, 0xfc, 0x9b, 0x01, 0x19, 0xf1, 0x5a, 0xa2, 0xa7, 0x50, 0xe0,
- 0x21, 0xe9, 0xc9, 0x04, 0x19, 0x32, 0x41, 0x7b, 0x3a, 0x41, 0xbb, 0xc9, 0x13, 0xd4, 0x15, 0x4c,
- 0x9d, 0x36, 0xce, 0x4b, 0xca, 0x8e, 0x85, 0x9e, 0x40, 0x9e, 0x05, 0xc4, 0x13, 0xe4, 0x29, 0x49,
- 0xfe, 0x2b, 0x4d, 0xfe, 0x5d, 0x72, 0xf2, 0xf3, 0x80, 0x78, 0x9d, 0x36, 0xce, 0x09, 0xc2, 0x8e,
- 0xd5, 0xfc, 0xbb, 0x01, 0xc5, 0xb8, 0xfa, 0x85, 0xd3, 0xb2, 0x2a, 0x67, 0x22, 0x54, 0x16, 0xc2,
- 0x44, 0x91, 0x41, 0x7f, 0x84, 0x4f, 0xc9, 0x60, 0x10, 0xd2, 0x81, 0x9e, 0xa0, 0xa8, 0x1b, 0xf8,
- 0x21, 0x71, 0x6c, 0x3e, 0x92, 0x0d, 0xae, 0xba, 0xbd, 0x9f, 0xe8, 0x01, 0x1f, 0x53, 0x75, 0xc7,
- 0x4c, 0x78, 0x85, 0xcc, 0x95, 0x37, 0x9f, 0xa7, 0x20, 0xa7, 0x2e, 0x11, 0xda, 0x86, 0x4f, 0xa2,
- 0xa6, 0xc3, 0x4c, 0xc6, 0x49, 0xc8, 0xcd, 0x49, 0xcf, 0x7e, 0x10, 0x2b, 0xcf, 0x85, 0x4e, 0xdd,
- 0xb7, 0x89, 0x06, 0xc6, 0x4c, 0x87, 0x7a, 0x03, 0x7e, 0xad, 0x7d, 0x8c, 0x1b, 0x18, 0x3b, 0x96,
- 0x62, 0xb4, 0x0c, 0xd9, 0x1b, 0x11, 0x3d, 0xd9, 0xe0, 0xd2, 0x58, 0x2d, 0xe6, 0xd7, 0x6b, 0x66,
- 0x7e, 0xbd, 0xa2, 0x35, 0x3d, 0xae, 0xa9, 0x63, 0x89, 0x06, 0x97, 0xfd, 0x7e, 0x41, 0x8d, 0x5f,
- 0xea, 0x38, 0xf7, 0x61, 0x59, 0xb4, 0x44, 0xc6, 0x89, 0x1b, 0x30, 0x31, 0x04, 0xdd, 0xca, 0x66,
- 0x28, 0x07, 0xac, 0x0c, 0x46, 0x63, 0xdd, 0x85, 0x67, 0xdf, 0x8a, 0x8e, 0xb8, 0x5f, 0x81, 0xd2,
- 0x98, 0xd2, 0x6c, 0xfe, 0x27, 0x05, 0x79, 0x3d, 0xe4, 0x8a, 0x59, 0xce, 0xa5, 0xae, 0x1f, 0x8e,
- 0x54, 0x30, 0x64, 0x18, 0x32, 0xb8, 0xa4, 0x64, 0x32, 0x06, 0x13, 0x10, 0xc7, 0x76, 0x6d, 0x2e,
- 0x5d, 0x8f, 0x21, 0xc7, 0x42, 0x84, 0xd6, 0xa0, 0x24, 0xdb, 0x91, 0xdf, 0xef, 0x33, 0xca, 0x65,
- 0x46, 0x33, 0x18, 0x84, 0xe8, 0x54, 0x4a, 0x44, 0x04, 0xc4, 0xca, 0x23, 0xee, 0x44, 0x31, 0x65,
- 0x64, 0x0c, 0x6b, 0x91, 0x22, 0xae, 0x95, 0xb9, 0xe1, 0xca, 0xde, 0x11, 0xae, 0x7b, 0x50, 0xb9,
- 0x26, 0xcc, 0x8c, 0x46, 0x5e, 0x26, 0xdb, 0x7d, 0x01, 0x97, 0xaf, 0x09, 0x8b, 0x06, 0xe2, 0x31,
- 0x48, 0x5b, 0x62, 0xb2, 0x75, 0x6b, 0x50, 0x24, 0x43, 0x2d, 0xa8, 0x09, 0x90, 0x63, 0x7b, 0xd4,
- 0xf4, 0x86, 0xee, 0x15, 0x0d, 0x45, 0xfb, 0x16, 0xb8, 0xea, 0x35, 0x61, 0xc7, 0xb6, 0x47, 0x4f,
- 0x94, 0x14, 0xad, 0xc3, 0x92, 0x40, 0xda, 0x9e, 0xc4, 0xf6, 0x43, 0x49, 0x59, 0x94, 0xd0, 0xc5,
- 0x6b, 0xc2, 0x3a, 0x52, 0x7e, 0x24, 0xc5, 0xcd, 0xff, 0x1a, 0x50, 0x88, 0xc6, 0x7e, 0xf4, 0xe5,
- 0xf8, 0xf3, 0x64, 0xa2, 0xea, 0xbe, 0x5f, 0x88, 0xbf, 0x35, 0x54, 0x86, 0xeb, 0x90, 0x27, 0x96,
- 0x15, 0x52, 0xc6, 0x74, 0xb0, 0xa3, 0x25, 0x6a, 0x43, 0x46, 0x70, 0xeb, 0xcf, 0x83, 0xa4, 0x53,
- 0x3c, 0xc5, 0x72, 0x37, 0xfa, 0x11, 0x14, 0x6d, 0x66, 0xf6, 0x7d, 0xc7, 0xa2, 0x96, 0xcc, 0x42,
- 0x01, 0x17, 0x6c, 0x76, 0x24, 0xd7, 0x89, 0xa2, 0xbf, 0x5f, 0x83, 0xea, 0x94, 0x43, 0x66, 0xf3,
- 0x89, 0xec, 0x80, 0x54, 0xbc, 0xd5, 0xf1, 0x27, 0xcd, 0xe4, 0x0d, 0x8b, 0x3f, 0x4e, 0x94, 0xab,
- 0x48, 0x3b, 0x94, 0x92, 0x6f, 0xa9, 0x3a, 0xde, 0x0a, 0xe4, 0x7a, 0xbe, 0x33, 0x74, 0x3d, 0x59,
- 0x48, 0x69, 0xac, 0x57, 0xcd, 0xbf, 0x1a, 0x50, 0x88, 0x72, 0x2a, 0x52, 0x3a, 0x5d, 0x4d, 0xba,
- 0x35, 0x4d, 0x55, 0xd2, 0x7d, 0x58, 0x66, 0x23, 0xc6, 0xa9, 0x6b, 0x4e, 0x63, 0xd5, 0xed, 0x45,
- 0x4a, 0x77, 0x32, 0x53, 0x7b, 0xef, 0x17, 0x6a, 0xfa, 0x8e, 0x42, 0x15, 0x83, 0x9a, 0x6c, 0x21,
- 0xd2, 0x05, 0x3d, 0xae, 0x48, 0x89, 0x08, 0xc1, 0xfa, 0x73, 0x03, 0x56, 0xe6, 0x77, 0x2a, 0xf4,
- 0x15, 0xdc, 0xdb, 0x7b, 0xf0, 0x00, 0x1f, 0x3e, 0xd8, 0xeb, 0x76, 0x4e, 0x4f, 0xcc, 0xee, 0xe1,
- 0xe3, 0xb3, 0x53, 0xbc, 0x77, 0xdc, 0xe9, 0x3e, 0x31, 0x2f, 0x4e, 0xce, 0xcf, 0x0e, 0x0f, 0x3a,
- 0x47, 0x9d, 0xc3, 0x76, 0x6d, 0x01, 0x7d, 0x01, 0x9f, 0xdf, 0x05, 0x6c, 0x1f, 0x1e, 0x77, 0xf7,
- 0x6a, 0x06, 0xfa, 0x31, 0x34, 0xef, 0x82, 0x1c, 0x5c, 0x3c, 0xbe, 0x38, 0xde, 0xeb, 0x76, 0x2e,
- 0x0f, 0x6b, 0xa9, 0xfd, 0x57, 0xc6, 0x8b, 0x37, 0xab, 0xc6, 0xcb, 0x37, 0xab, 0xc6, 0xbf, 0xde,
- 0xac, 0x1a, 0x7f, 0x7a, 0xbb, 0xba, 0xf0, 0xf2, 0xed, 0xea, 0xc2, 0xab, 0xb7, 0xab, 0x0b, 0xf0,
- 0x8d, 0xed, 0x27, 0x28, 0xa5, 0xfd, 0x4a, 0x34, 0x4c, 0x9f, 0x09, 0xd4, 0x99, 0xf1, 0xdb, 0x5f,
- 0x27, 0x7e, 0x77, 0xd4, 0x17, 0xfe, 0x80, 0x7a, 0x77, 0xfc, 0x37, 0xe2, 0xcf, 0xa9, 0xf5, 0xd3,
- 0x80, 0x7a, 0xdd, 0x98, 0x50, 0x9a, 0x8a, 0x3f, 0x6f, 0x36, 0x2e, 0xb7, 0xda, 0x63, 0xf0, 0x55,
- 0x4e, 0xb2, 0xed, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x89, 0x14, 0x57, 0x2d, 0xef, 0x10, 0x00,
- 0x00,
-}
-
-func (m *ProfilesDictionary) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ProfilesDictionary) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ProfilesDictionary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.AttributeUnits) > 0 {
- for iNdEx := len(m.AttributeUnits) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.AttributeUnits[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if len(m.AttributeTable) > 0 {
- for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- if len(m.StringTable) > 0 {
- for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.StringTable[iNdEx])
- copy(dAtA[i:], m.StringTable[iNdEx])
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.StringTable[iNdEx])))
- i--
- dAtA[i] = 0x2a
- }
- }
- if len(m.LinkTable) > 0 {
- for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.FunctionTable) > 0 {
- for iNdEx := len(m.FunctionTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.FunctionTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.LocationTable) > 0 {
- for iNdEx := len(m.LocationTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.LocationTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.MappingTable) > 0 {
- for iNdEx := len(m.MappingTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.MappingTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ProfilesData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if len(m.ResourceProfiles) > 0 {
- for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeProfiles) > 0 {
- for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Profiles) > 0 {
- for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Profile) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Profile) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.AttributeIndices) > 0 {
- dAtA5 := make([]byte, len(m.AttributeIndices)*10)
- var j4 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j4++
- }
- dAtA5[j4] = uint8(num)
- j4++
- }
- i -= j4
- copy(dAtA[i:], dAtA5[:j4])
- i = encodeVarintProfiles(dAtA, i, uint64(j4))
- i--
- dAtA[i] = 0x72
- }
- if len(m.OriginalPayload) > 0 {
- i -= len(m.OriginalPayload)
- copy(dAtA[i:], m.OriginalPayload)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload)))
- i--
- dAtA[i] = 0x6a
- }
- if len(m.OriginalPayloadFormat) > 0 {
- i -= len(m.OriginalPayloadFormat)
- copy(dAtA[i:], m.OriginalPayloadFormat)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat)))
- i--
- dAtA[i] = 0x62
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x58
- }
- {
- size := m.ProfileId.Size()
- i -= size
- if _, err := m.ProfileId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- if m.DefaultSampleTypeIndex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.DefaultSampleTypeIndex))
- i--
- dAtA[i] = 0x48
- }
- if len(m.CommentStrindices) > 0 {
- dAtA7 := make([]byte, len(m.CommentStrindices)*10)
- var j6 int
- for _, num1 := range m.CommentStrindices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j6++
- }
- dAtA7[j6] = uint8(num)
- j6++
- }
- i -= j6
- copy(dAtA[i:], dAtA7[:j6])
- i = encodeVarintProfiles(dAtA, i, uint64(j6))
- i--
- dAtA[i] = 0x42
- }
- if m.Period != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Period))
- i--
- dAtA[i] = 0x38
- }
- {
- size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- if m.DurationNanos != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.DurationNanos))
- i--
- dAtA[i] = 0x28
- }
- if m.TimeNanos != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.TimeNanos))
- i--
- dAtA[i] = 0x20
- }
- if len(m.LocationIndices) > 0 {
- dAtA10 := make([]byte, len(m.LocationIndices)*10)
- var j9 int
- for _, num1 := range m.LocationIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA10[j9] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j9++
- }
- dAtA10[j9] = uint8(num)
- j9++
- }
- i -= j9
- copy(dAtA[i:], dAtA10[:j9])
- i = encodeVarintProfiles(dAtA, i, uint64(j9))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Sample) > 0 {
- for iNdEx := len(m.Sample) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Sample[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.SampleType) > 0 {
- for iNdEx := len(m.SampleType) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.SampleType[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AttributeUnit) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AttributeUnit) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AttributeUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.UnitStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex))
- i--
- dAtA[i] = 0x10
- }
- if m.AttributeKeyStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.AttributeKeyStrindex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Link) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Link) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ValueType) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ValueType) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.AggregationTemporality != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x18
- }
- if m.UnitStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex))
- i--
- dAtA[i] = 0x10
- }
- if m.TypeStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.TypeStrindex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Sample) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.TimestampsUnixNano) > 0 {
- dAtA12 := make([]byte, len(m.TimestampsUnixNano)*10)
- var j11 int
- for _, num := range m.TimestampsUnixNano {
- for num >= 1<<7 {
- dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j11++
- }
- dAtA12[j11] = uint8(num)
- j11++
- }
- i -= j11
- copy(dAtA[i:], dAtA12[:j11])
- i = encodeVarintProfiles(dAtA, i, uint64(j11))
- i--
- dAtA[i] = 0x32
- }
- if m.LinkIndex_ != nil {
- {
- size := m.LinkIndex_.Size()
- i -= size
- if _, err := m.LinkIndex_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.AttributeIndices) > 0 {
- dAtA14 := make([]byte, len(m.AttributeIndices)*10)
- var j13 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA14[j13] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j13++
- }
- dAtA14[j13] = uint8(num)
- j13++
- }
- i -= j13
- copy(dAtA[i:], dAtA14[:j13])
- i = encodeVarintProfiles(dAtA, i, uint64(j13))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Value) > 0 {
- dAtA16 := make([]byte, len(m.Value)*10)
- var j15 int
- for _, num1 := range m.Value {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA16[j15] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j15++
- }
- dAtA16[j15] = uint8(num)
- j15++
- }
- i -= j15
- copy(dAtA[i:], dAtA16[:j15])
- i = encodeVarintProfiles(dAtA, i, uint64(j15))
- i--
- dAtA[i] = 0x1a
- }
- if m.LocationsLength != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsLength))
- i--
- dAtA[i] = 0x10
- }
- if m.LocationsStartIndex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsStartIndex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Sample_LinkIndex) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sample_LinkIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintProfiles(dAtA, i, uint64(m.LinkIndex))
- i--
- dAtA[i] = 0x28
- return len(dAtA) - i, nil
-}
-func (m *Mapping) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Mapping) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.HasInlineFrames {
- i--
- if m.HasInlineFrames {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x48
- }
- if m.HasLineNumbers {
- i--
- if m.HasLineNumbers {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- if m.HasFilenames {
- i--
- if m.HasFilenames {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x38
- }
- if m.HasFunctions {
- i--
- if m.HasFunctions {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- if len(m.AttributeIndices) > 0 {
- dAtA18 := make([]byte, len(m.AttributeIndices)*10)
- var j17 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA18[j17] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j17++
- }
- dAtA18[j17] = uint8(num)
- j17++
- }
- i -= j17
- copy(dAtA[i:], dAtA18[:j17])
- i = encodeVarintProfiles(dAtA, i, uint64(j17))
- i--
- dAtA[i] = 0x2a
- }
- if m.FilenameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex))
- i--
- dAtA[i] = 0x20
- }
- if m.FileOffset != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FileOffset))
- i--
- dAtA[i] = 0x18
- }
- if m.MemoryLimit != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryLimit))
- i--
- dAtA[i] = 0x10
- }
- if m.MemoryStart != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryStart))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Location) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Location) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.AttributeIndices) > 0 {
- dAtA20 := make([]byte, len(m.AttributeIndices)*10)
- var j19 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA20[j19] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j19++
- }
- dAtA20[j19] = uint8(num)
- j19++
- }
- i -= j19
- copy(dAtA[i:], dAtA20[:j19])
- i = encodeVarintProfiles(dAtA, i, uint64(j19))
- i--
- dAtA[i] = 0x2a
- }
- if m.IsFolded {
- i--
- if m.IsFolded {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if len(m.Line) > 0 {
- for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Line[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Address != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Address))
- i--
- dAtA[i] = 0x10
- }
- if m.MappingIndex_ != nil {
- {
- size := m.MappingIndex_.Size()
- i -= size
- if _, err := m.MappingIndex_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Location_MappingIndex) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Location_MappingIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintProfiles(dAtA, i, uint64(m.MappingIndex))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-func (m *Line) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Line) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Column != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Column))
- i--
- dAtA[i] = 0x18
- }
- if m.Line != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Line))
- i--
- dAtA[i] = 0x10
- }
- if m.FunctionIndex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FunctionIndex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Function) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Function) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.StartLine != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.StartLine))
- i--
- dAtA[i] = 0x20
- }
- if m.FilenameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex))
- i--
- dAtA[i] = 0x18
- }
- if m.SystemNameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.SystemNameStrindex))
- i--
- dAtA[i] = 0x10
- }
- if m.NameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.NameStrindex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int {
- offset -= sovProfiles(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ProfilesDictionary) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.MappingTable) > 0 {
- for _, e := range m.MappingTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.LocationTable) > 0 {
- for _, e := range m.LocationTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.FunctionTable) > 0 {
- for _, e := range m.FunctionTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.LinkTable) > 0 {
- for _, e := range m.LinkTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.StringTable) > 0 {
- for _, s := range m.StringTable {
- l = len(s)
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.AttributeTable) > 0 {
- for _, e := range m.AttributeTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.AttributeUnits) > 0 {
- for _, e := range m.AttributeUnits {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- return n
-}
-
-func (m *ProfilesData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceProfiles) > 0 {
- for _, e := range m.ResourceProfiles {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- l = m.Dictionary.Size()
- n += 1 + l + sovProfiles(uint64(l))
- return n
-}
-
-func (m *ResourceProfiles) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if len(m.ScopeProfiles) > 0 {
- for _, e := range m.ScopeProfiles {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- return n
-}
-
-func (m *ScopeProfiles) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if len(m.Profiles) > 0 {
- for _, e := range m.Profiles {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- return n
-}
-
-func (m *Profile) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.SampleType) > 0 {
- for _, e := range m.SampleType {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.Sample) > 0 {
- for _, e := range m.Sample {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.LocationIndices) > 0 {
- l = 0
- for _, e := range m.LocationIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.TimeNanos != 0 {
- n += 1 + sovProfiles(uint64(m.TimeNanos))
- }
- if m.DurationNanos != 0 {
- n += 1 + sovProfiles(uint64(m.DurationNanos))
- }
- l = m.PeriodType.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if m.Period != 0 {
- n += 1 + sovProfiles(uint64(m.Period))
- }
- if len(m.CommentStrindices) > 0 {
- l = 0
- for _, e := range m.CommentStrindices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.DefaultSampleTypeIndex != 0 {
- n += 1 + sovProfiles(uint64(m.DefaultSampleTypeIndex))
- }
- l = m.ProfileId.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovProfiles(uint64(m.DroppedAttributesCount))
- }
- l = len(m.OriginalPayloadFormat)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- l = len(m.OriginalPayload)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- return n
-}
-
-func (m *AttributeUnit) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.AttributeKeyStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.AttributeKeyStrindex))
- }
- if m.UnitStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.UnitStrindex))
- }
- return n
-}
-
-func (m *Link) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.TraceId.Size()
- n += 1 + l + sovProfiles(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovProfiles(uint64(l))
- return n
-}
-
-func (m *ValueType) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TypeStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.TypeStrindex))
- }
- if m.UnitStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.UnitStrindex))
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovProfiles(uint64(m.AggregationTemporality))
- }
- return n
-}
-
-func (m *Sample) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.LocationsStartIndex != 0 {
- n += 1 + sovProfiles(uint64(m.LocationsStartIndex))
- }
- if m.LocationsLength != 0 {
- n += 1 + sovProfiles(uint64(m.LocationsLength))
- }
- if len(m.Value) > 0 {
- l = 0
- for _, e := range m.Value {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.LinkIndex_ != nil {
- n += m.LinkIndex_.Size()
- }
- if len(m.TimestampsUnixNano) > 0 {
- l = 0
- for _, e := range m.TimestampsUnixNano {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- return n
-}
-
-func (m *Sample_LinkIndex) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovProfiles(uint64(m.LinkIndex))
- return n
-}
-func (m *Mapping) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.MemoryStart != 0 {
- n += 1 + sovProfiles(uint64(m.MemoryStart))
- }
- if m.MemoryLimit != 0 {
- n += 1 + sovProfiles(uint64(m.MemoryLimit))
- }
- if m.FileOffset != 0 {
- n += 1 + sovProfiles(uint64(m.FileOffset))
- }
- if m.FilenameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.FilenameStrindex))
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.HasFunctions {
- n += 2
- }
- if m.HasFilenames {
- n += 2
- }
- if m.HasLineNumbers {
- n += 2
- }
- if m.HasInlineFrames {
- n += 2
- }
- return n
-}
-
-func (m *Location) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.MappingIndex_ != nil {
- n += m.MappingIndex_.Size()
- }
- if m.Address != 0 {
- n += 1 + sovProfiles(uint64(m.Address))
- }
- if len(m.Line) > 0 {
- for _, e := range m.Line {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if m.IsFolded {
- n += 2
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- return n
-}
-
-func (m *Location_MappingIndex) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovProfiles(uint64(m.MappingIndex))
- return n
-}
-func (m *Line) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.FunctionIndex != 0 {
- n += 1 + sovProfiles(uint64(m.FunctionIndex))
- }
- if m.Line != 0 {
- n += 1 + sovProfiles(uint64(m.Line))
- }
- if m.Column != 0 {
- n += 1 + sovProfiles(uint64(m.Column))
- }
- return n
-}
-
-func (m *Function) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.NameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.NameStrindex))
- }
- if m.SystemNameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.SystemNameStrindex))
- }
- if m.FilenameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.FilenameStrindex))
- }
- if m.StartLine != 0 {
- n += 1 + sovProfiles(uint64(m.StartLine))
- }
- return n
-}
-
-func sovProfiles(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozProfiles(x uint64) (n int) {
- return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ProfilesDictionary) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ProfilesDictionary: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ProfilesDictionary: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.MappingTable = append(m.MappingTable, &Mapping{})
- if err := m.MappingTable[len(m.MappingTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LocationTable = append(m.LocationTable, &Location{})
- if err := m.LocationTable[len(m.LocationTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FunctionTable = append(m.FunctionTable, &Function{})
- if err := m.FunctionTable[len(m.FunctionTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LinkTable = append(m.LinkTable, &Link{})
- if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AttributeTable = append(m.AttributeTable, v1.KeyValue{})
- if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AttributeUnits = append(m.AttributeUnits, &AttributeUnit{})
- if err := m.AttributeUnits[len(m.AttributeUnits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ProfilesData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{})
- if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceProfiles) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{})
- if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeProfiles) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Profiles = append(m.Profiles, &Profile{})
- if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Profile) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Profile: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SampleType = append(m.SampleType, &ValueType{})
- if err := m.SampleType[len(m.SampleType)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Sample = append(m.Sample, &Sample{})
- if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LocationIndices = append(m.LocationIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.LocationIndices) == 0 {
- m.LocationIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LocationIndices = append(m.LocationIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType)
- }
- m.TimeNanos = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TimeNanos |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType)
- }
- m.DurationNanos = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DurationNanos |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
- }
- m.Period = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Period |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.CommentStrindices = append(m.CommentStrindices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.CommentStrindices) == 0 {
- m.CommentStrindices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.CommentStrindices = append(m.CommentStrindices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType)
- }
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleTypeIndex", wireType)
- }
- m.DefaultSampleTypeIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DefaultSampleTypeIndex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ProfileId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 13:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...)
- if m.OriginalPayload == nil {
- m.OriginalPayload = []byte{}
- }
- iNdEx = postIndex
- case 14:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AttributeUnit) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AttributeUnit: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AttributeUnit: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeKeyStrindex", wireType)
- }
- m.AttributeKeyStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AttributeKeyStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
- }
- m.UnitStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UnitStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Link) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Link: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ValueType) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ValueType: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ValueType: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType)
- }
- m.TypeStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TypeStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
- }
- m.UnitStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UnitStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Sample) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Sample: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType)
- }
- m.LocationsStartIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LocationsStartIndex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType)
- }
- m.LocationsLength = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LocationsLength |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType == 0 {
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Value = append(m.Value, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.Value) == 0 {
- m.Value = make([]int64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Value = append(m.Value, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- case 4:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LinkIndex_ = &Sample_LinkIndex{v}
- case 6:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TimestampsUnixNano = append(m.TimestampsUnixNano, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.TimestampsUnixNano) == 0 {
- m.TimestampsUnixNano = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TimestampsUnixNano = append(m.TimestampsUnixNano, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Mapping) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Mapping: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Mapping: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType)
- }
- m.MemoryStart = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemoryStart |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType)
- }
- m.MemoryLimit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemoryLimit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType)
- }
- m.FileOffset = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FileOffset |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
- }
- m.FilenameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FilenameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasFunctions", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasFunctions = bool(v != 0)
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasFilenames", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasFilenames = bool(v != 0)
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasLineNumbers", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasLineNumbers = bool(v != 0)
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasInlineFrames", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasInlineFrames = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Location) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Location: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.MappingIndex_ = &Location_MappingIndex{v}
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
- }
- m.Address = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Address |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Line = append(m.Line, &Line{})
- if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsFolded", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsFolded = bool(v != 0)
- case 5:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Line) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Line: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Line: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType)
- }
- m.FunctionIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FunctionIndex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
- }
- m.Line = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Line |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType)
- }
- m.Column = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Column |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Function) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Function: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType)
- }
- m.NameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType)
- }
- m.SystemNameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SystemNameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
- }
- m.FilenameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FilenameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType)
- }
- m.StartLine = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StartLine |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipProfiles(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthProfiles
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupProfiles
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthProfiles
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go
deleted file mode 100644
index eedc2c0a4..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go
+++ /dev/null
@@ -1,450 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/resource/v1/resource.proto
-
-package v1
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// Resource information.
-type Resource struct {
- // Set of attributes that describe the resource.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0, then
- // no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Set of entities that participate in this Resource.
- //
- // Note: keys in the references MUST exist in attributes of this message.
- //
- // Status: [Development]
- EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"`
-}
-
-func (m *Resource) Reset() { *m = Resource{} }
-func (m *Resource) String() string { return proto.CompactTextString(m) }
-func (*Resource) ProtoMessage() {}
-func (*Resource) Descriptor() ([]byte, []int) {
- return fileDescriptor_446f73eacf88f3f5, []int{0}
-}
-func (m *Resource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Resource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Resource.Merge(m, src)
-}
-func (m *Resource) XXX_Size() int {
- return m.Size()
-}
-func (m *Resource) XXX_DiscardUnknown() {
- xxx_messageInfo_Resource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Resource proto.InternalMessageInfo
-
-func (m *Resource) GetAttributes() []v1.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Resource) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Resource) GetEntityRefs() []*v1.EntityRef {
- if m != nil {
- return m.EntityRefs
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5)
-}
-
-var fileDescriptor_446f73eacf88f3f5 = []byte{
- // 334 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
- 0x10, 0xc6, 0xb3, 0xfa, 0xe7, 0x4f, 0x59, 0xf1, 0x12, 0x4a, 0x09, 0x1e, 0xa2, 0x78, 0xa9, 0xf4,
- 0xb0, 0x21, 0xed, 0xa5, 0xd7, 0x5a, 0x5a, 0x28, 0xa5, 0x54, 0x42, 0xf1, 0xd0, 0x8b, 0xc4, 0x38,
- 0x86, 0x40, 0xdc, 0x09, 0x9b, 0x89, 0xe0, 0x5b, 0xf4, 0x39, 0xfa, 0x02, 0x7d, 0x05, 0x8f, 0x1e,
- 0x7b, 0x92, 0xa2, 0x2f, 0x52, 0xb2, 0x31, 0xa9, 0x2d, 0x82, 0xb7, 0x6f, 0xe7, 0xfb, 0xe6, 0x37,
- 0xc3, 0x2c, 0x17, 0x98, 0x80, 0x24, 0x88, 0x61, 0x06, 0xa4, 0x16, 0x4e, 0xa2, 0x90, 0xd0, 0x51,
- 0x90, 0x62, 0xa6, 0x02, 0x70, 0xe6, 0x6e, 0xa5, 0x85, 0xb6, 0xcc, 0xf6, 0xaf, 0x7c, 0x51, 0x14,
- 0x55, 0x66, 0xee, 0xb6, 0x4e, 0x43, 0x0c, 0xb1, 0xc0, 0xe4, 0xaa, 0x48, 0xb4, 0x2e, 0x0e, 0x8d,
- 0x09, 0x70, 0x36, 0x43, 0x99, 0x0f, 0x29, 0x54, 0x91, 0xed, 0xae, 0x19, 0x3f, 0xf1, 0x76, 0x44,
- 0xf3, 0x89, 0x73, 0x9f, 0x48, 0x45, 0xe3, 0x8c, 0x20, 0xb5, 0x58, 0xa7, 0xde, 0x6b, 0x5c, 0x9e,
- 0x8b, 0x43, 0x4b, 0xec, 0x18, 0x73, 0x57, 0x3c, 0xc2, 0x62, 0xe8, 0xc7, 0x19, 0xf4, 0xff, 0x2d,
- 0xd7, 0x6d, 0xc3, 0xdb, 0x03, 0x98, 0xd7, 0xdc, 0x9a, 0x28, 0x4c, 0x12, 0x98, 0x8c, 0x7e, 0xaa,
- 0xa3, 0x00, 0x33, 0x49, 0x56, 0xad, 0xc3, 0x7a, 0x4d, 0xef, 0x6c, 0xe7, 0xdf, 0x54, 0xf6, 0x6d,
- 0xee, 0x9a, 0x0f, 0xbc, 0x01, 0x92, 0x22, 0x5a, 0x8c, 0x14, 0x4c, 0x53, 0xab, 0xae, 0x37, 0xe9,
- 0x1d, 0xd9, 0xe4, 0x4e, 0x77, 0x78, 0x30, 0xf5, 0x38, 0x94, 0x32, 0xed, 0x7f, 0xb0, 0xe5, 0xc6,
- 0x66, 0xab, 0x8d, 0xcd, 0xbe, 0x36, 0x36, 0x7b, 0xdb, 0xda, 0xc6, 0x6a, 0x6b, 0x1b, 0x9f, 0x5b,
- 0xdb, 0xe0, 0xdd, 0x08, 0xc5, 0x91, 0x0b, 0xf7, 0x9b, 0xe5, 0x71, 0x06, 0xb9, 0x35, 0x60, 0xaf,
- 0xf7, 0xe1, 0xdf, 0xa6, 0x28, 0x3f, 0x6e, 0x1c, 0x43, 0x40, 0xa8, 0x9c, 0x64, 0xe2, 0x93, 0xef,
- 0x44, 0x92, 0x40, 0x49, 0x3f, 0x76, 0xf4, 0x4b, 0x53, 0x43, 0x90, 0xfb, 0x5f, 0xfd, 0x5e, 0x6b,
- 0x3f, 0x27, 0x20, 0x5f, 0x2a, 0x8a, 0xe6, 0x8b, 0x72, 0x9a, 0x18, 0xba, 0xe3, 0xff, 0xba, 0xef,
- 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x10, 0xa9, 0xec, 0x36, 0x02, 0x00, 0x00,
-}
-
-func (m *Resource) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Resource) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.EntityRefs) > 0 {
- for iNdEx := len(m.EntityRefs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.EntityRefs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintResource(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintResource(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintResource(dAtA []byte, offset int, v uint64) int {
- offset -= sovResource(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Resource) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovResource(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovResource(uint64(m.DroppedAttributesCount))
- }
- if len(m.EntityRefs) > 0 {
- for _, e := range m.EntityRefs {
- l = e.Size()
- n += 1 + l + sovResource(uint64(l))
- }
- }
- return n
-}
-
-func sovResource(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozResource(x uint64) (n int) {
- return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Resource) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Resource: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthResource
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthResource
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v1.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthResource
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthResource
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EntityRefs = append(m.EntityRefs, &v1.EntityRef{})
- if err := m.EntityRefs[len(m.EntityRefs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipResource(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthResource
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipResource(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowResource
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowResource
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowResource
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthResource
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupResource
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthResource
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowResource = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go
deleted file mode 100644
index b0bddfb98..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go
+++ /dev/null
@@ -1,3045 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/trace/v1/trace.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// SpanFlags represents constants used to interpret the
-// Span.flags field, which is protobuf 'fixed32' type and is to
-// be used as bit-fields. Each non-zero value defined in this enum is
-// a bit-mask. To extract the bit-field, for example, use an
-// expression like:
-//
-// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
-//
-// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
-//
-// Note that Span flags were introduced in version 1.1 of the
-// OpenTelemetry protocol. Older Span producers do not set this
-// field, consequently consumers should not rely on the absence of a
-// particular flag bit to indicate the presence of a particular feature.
-type SpanFlags int32
-
-const (
- // The zero value for the enum. Should not be used for comparisons.
- // Instead use bitwise "and" with the appropriate mask as shown above.
- SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
- // Bits 0-7 are used for trace flags.
- SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
- // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
- // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
- // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
- SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256
- SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512
-)
-
-var SpanFlags_name = map[int32]string{
- 0: "SPAN_FLAGS_DO_NOT_USE",
- 255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
- 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK",
- 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK",
-}
-
-var SpanFlags_value = map[string]int32{
- "SPAN_FLAGS_DO_NOT_USE": 0,
- "SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
- "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256,
- "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512,
-}
-
-func (x SpanFlags) String() string {
- return proto.EnumName(SpanFlags_name, int32(x))
-}
-
-func (SpanFlags) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{0}
-}
-
-// SpanKind is the type of span. Can be used to specify additional relationships between spans
-// in addition to a parent/child relationship.
-type Span_SpanKind int32
-
-const (
- // Unspecified. Do NOT use as default.
- // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
- Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
- // Indicates that the span represents an internal operation within an application,
- // as opposed to an operation happening at the boundaries. Default value.
- Span_SPAN_KIND_INTERNAL Span_SpanKind = 1
- // Indicates that the span covers server-side handling of an RPC or other
- // remote network request.
- Span_SPAN_KIND_SERVER Span_SpanKind = 2
- // Indicates that the span describes a request to some remote service.
- Span_SPAN_KIND_CLIENT Span_SpanKind = 3
- // Indicates that the span describes a producer sending a message to a broker.
- // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
- // between producer and consumer spans. A PRODUCER span ends when the message was accepted
- // by the broker while the logical processing of the message might span a much longer time.
- Span_SPAN_KIND_PRODUCER Span_SpanKind = 4
- // Indicates that the span describes consumer receiving a message from a broker.
- // Like the PRODUCER kind, there is often no direct critical path latency relationship
- // between producer and consumer spans.
- Span_SPAN_KIND_CONSUMER Span_SpanKind = 5
-)
-
-var Span_SpanKind_name = map[int32]string{
- 0: "SPAN_KIND_UNSPECIFIED",
- 1: "SPAN_KIND_INTERNAL",
- 2: "SPAN_KIND_SERVER",
- 3: "SPAN_KIND_CLIENT",
- 4: "SPAN_KIND_PRODUCER",
- 5: "SPAN_KIND_CONSUMER",
-}
-
-var Span_SpanKind_value = map[string]int32{
- "SPAN_KIND_UNSPECIFIED": 0,
- "SPAN_KIND_INTERNAL": 1,
- "SPAN_KIND_SERVER": 2,
- "SPAN_KIND_CLIENT": 3,
- "SPAN_KIND_PRODUCER": 4,
- "SPAN_KIND_CONSUMER": 5,
-}
-
-func (x Span_SpanKind) String() string {
- return proto.EnumName(Span_SpanKind_name, int32(x))
-}
-
-func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3, 0}
-}
-
-// For the semantics of status codes see
-// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
-type Status_StatusCode int32
-
-const (
- // The default status.
- Status_STATUS_CODE_UNSET Status_StatusCode = 0
- // The Span has been validated by an Application developer or Operator to
- // have completed successfully.
- Status_STATUS_CODE_OK Status_StatusCode = 1
- // The Span contains an error.
- Status_STATUS_CODE_ERROR Status_StatusCode = 2
-)
-
-var Status_StatusCode_name = map[int32]string{
- 0: "STATUS_CODE_UNSET",
- 1: "STATUS_CODE_OK",
- 2: "STATUS_CODE_ERROR",
-}
-
-var Status_StatusCode_value = map[string]int32{
- "STATUS_CODE_UNSET": 0,
- "STATUS_CODE_OK": 1,
- "STATUS_CODE_ERROR": 2,
-}
-
-func (x Status_StatusCode) String() string {
- return proto.EnumName(Status_StatusCode_name, int32(x))
-}
-
-func (Status_StatusCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{4, 0}
-}
-
-// TracesData represents the traces data that can be stored in a persistent storage,
-// OR can be embedded by other protocols that transfer OTLP traces data but do
-// not implement the OTLP protocol.
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type TracesData struct {
- // An array of ResourceSpans.
- // For data coming from a single resource this array will typically contain
- // one element. Intermediary nodes that receive data from multiple origins
- // typically batch the data before forwarding further and in that case this
- // array will contain multiple elements.
- ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
-}
-
-func (m *TracesData) Reset() { *m = TracesData{} }
-func (m *TracesData) String() string { return proto.CompactTextString(m) }
-func (*TracesData) ProtoMessage() {}
-func (*TracesData) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{0}
-}
-func (m *TracesData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TracesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_TracesData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *TracesData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TracesData.Merge(m, src)
-}
-func (m *TracesData) XXX_Size() int {
- return m.Size()
-}
-func (m *TracesData) XXX_DiscardUnknown() {
- xxx_messageInfo_TracesData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TracesData proto.InternalMessageInfo
-
-func (m *TracesData) GetResourceSpans() []*ResourceSpans {
- if m != nil {
- return m.ResourceSpans
- }
- return nil
-}
-
-// A collection of ScopeSpans from a Resource.
-type ResourceSpans struct {
- DeprecatedScopeSpans []*ScopeSpans `protobuf:"bytes,1000,rep,name=deprecated_scope_spans,json=deprecatedScopeSpans,proto3" json:"deprecated_scope_spans,omitempty"`
- // The resource for the spans in this message.
- // If this field is not set then no resource info is known.
- Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of ScopeSpans that originate from a resource.
- ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_spans" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceSpans) Reset() { *m = ResourceSpans{} }
-func (m *ResourceSpans) String() string { return proto.CompactTextString(m) }
-func (*ResourceSpans) ProtoMessage() {}
-func (*ResourceSpans) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{1}
-}
-func (m *ResourceSpans) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceSpans) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSpans.Merge(m, src)
-}
-func (m *ResourceSpans) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSpans) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSpans.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo
-
-func (m *ResourceSpans) GetDeprecatedScopeSpans() []*ScopeSpans {
- if m != nil {
- return m.DeprecatedScopeSpans
- }
- return nil
-}
-
-func (m *ResourceSpans) GetResource() v1.Resource {
- if m != nil {
- return m.Resource
- }
- return v1.Resource{}
-}
-
-func (m *ResourceSpans) GetScopeSpans() []*ScopeSpans {
- if m != nil {
- return m.ScopeSpans
- }
- return nil
-}
-
-func (m *ResourceSpans) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Spans produced by an InstrumentationScope.
-type ScopeSpans struct {
- // The instrumentation scope information for the spans in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of Spans that originate from an instrumentation scope.
- Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the span data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all spans and span events in the "spans" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeSpans) Reset() { *m = ScopeSpans{} }
-func (m *ScopeSpans) String() string { return proto.CompactTextString(m) }
-func (*ScopeSpans) ProtoMessage() {}
-func (*ScopeSpans) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{2}
-}
-func (m *ScopeSpans) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeSpans.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeSpans) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeSpans.Merge(m, src)
-}
-func (m *ScopeSpans) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeSpans) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeSpans.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeSpans proto.InternalMessageInfo
-
-func (m *ScopeSpans) GetScope() v11.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v11.InstrumentationScope{}
-}
-
-func (m *ScopeSpans) GetSpans() []*Span {
- if m != nil {
- return m.Spans
- }
- return nil
-}
-
-func (m *ScopeSpans) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A Span represents a single operation performed by a single component of the system.
-//
-// The next available field id is 17.
-type Span struct {
- // A unique identifier for a trace. All spans from the same trace share
- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
- // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is required.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for a span within a trace, assigned when the span
- // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
- // other than 8 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is required.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // trace_state conveys information about request position in multiple distributed tracing graphs.
- // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
- // See also https://github.com/w3c/distributed-tracing for more details about this field.
- TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
- // The `span_id` of this span's parent span. If this is a root span, then this
- // field must be empty. The ID is an 8-byte array.
- ParentSpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"parent_span_id"`
- // Flags, a bit field.
- //
- // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
- // Context specification. To read the 8-bit W3C trace flag, use
- // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
- //
- // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
- //
- // Bits 8 and 9 represent the 3 states of whether a span's parent
- // is remote. The states are (unknown, is not remote, is remote).
- // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
- // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
- //
- // When creating span messages, if the message is logically forwarded from another source
- // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
- // be copied as-is. If creating from a source that does not have an equivalent flags field
- // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
- // be set to zero.
- // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
- //
- // [Optional].
- Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
- // A description of the span's operation.
- //
- // For example, the name can be a qualified method name or a file name
- // and a line number where the operation is called. A best practice is to use
- // the same display name at the same call point in an application.
- // This makes it easier to correlate spans in different traces.
- //
- // This field is semantically required to be set to non-empty string.
- // Empty value is equivalent to an unknown span name.
- //
- // This field is required.
- Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
- // Distinguishes between spans generated in a particular context. For example,
- // two spans with the same name may be distinguished using `CLIENT` (caller)
- // and `SERVER` (callee) to identify queueing latency associated with the span.
- Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
- // start_time_unix_nano is the start time of the span. On the client side, this is the time
- // kept by the local machine where the span execution starts. On the server side, this
- // is the time when the server's application handler starts running.
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- //
- // This field is semantically required and it is expected that end_time >= start_time.
- StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // end_time_unix_nano is the end time of the span. On the client side, this is the time
- // kept by the local machine where the span execution ends. On the server side, this
- // is the time when the server application handler stops running.
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- //
- // This field is semantically required and it is expected that end_time >= start_time.
- EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
- // attributes is a collection of key/value pairs. Note, global attributes
- // like server name can be set using the resource API. Examples of attributes:
- //
- // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
- // "/http/server_latency": 300
- // "example.com/myattribute": true
- // "example.com/score": 10.239
- //
- // The OpenTelemetry API specification further restricts the allowed value types:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of attributes that were discarded. Attributes
- // can be discarded because their keys are too long or because there are too many
- // attributes. If this value is 0, then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // events is a collection of Event items.
- Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
- // dropped_events_count is the number of dropped events. If the value is 0, then no
- // events were dropped.
- DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
- // links is a collection of Links, which are references from this span to a span
- // in the same or different trace.
- Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
- // dropped_links_count is the number of dropped links after the maximum size was
- // enforced. If this value is 0, then no links were dropped.
- DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
- // An optional final status for this span. Semantically when Status isn't set, it means
- // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
- Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"`
-}
-
-func (m *Span) Reset() { *m = Span{} }
-func (m *Span) String() string { return proto.CompactTextString(m) }
-func (*Span) ProtoMessage() {}
-func (*Span) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3}
-}
-func (m *Span) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Span.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Span) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span.Merge(m, src)
-}
-func (m *Span) XXX_Size() int {
- return m.Size()
-}
-func (m *Span) XXX_DiscardUnknown() {
- xxx_messageInfo_Span.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span proto.InternalMessageInfo
-
-func (m *Span) GetTraceState() string {
- if m != nil {
- return m.TraceState
- }
- return ""
-}
-
-func (m *Span) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *Span) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Span) GetKind() Span_SpanKind {
- if m != nil {
- return m.Kind
- }
- return Span_SPAN_KIND_UNSPECIFIED
-}
-
-func (m *Span) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *Span) GetEndTimeUnixNano() uint64 {
- if m != nil {
- return m.EndTimeUnixNano
- }
- return 0
-}
-
-func (m *Span) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Span) GetEvents() []*Span_Event {
- if m != nil {
- return m.Events
- }
- return nil
-}
-
-func (m *Span) GetDroppedEventsCount() uint32 {
- if m != nil {
- return m.DroppedEventsCount
- }
- return 0
-}
-
-func (m *Span) GetLinks() []*Span_Link {
- if m != nil {
- return m.Links
- }
- return nil
-}
-
-func (m *Span) GetDroppedLinksCount() uint32 {
- if m != nil {
- return m.DroppedLinksCount
- }
- return 0
-}
-
-func (m *Span) GetStatus() Status {
- if m != nil {
- return m.Status
- }
- return Status{}
-}
-
-// Event is a time-stamped annotation of the span, consisting of user-supplied
-// text description and key-value pairs.
-type Span_Event struct {
- // time_unix_nano is the time the event occurred.
- TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // name of the event.
- // This field is semantically required to be set to non-empty string.
- Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- // attributes is a collection of attribute key/value pairs on the event.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0,
- // then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
-}
-
-func (m *Span_Event) Reset() { *m = Span_Event{} }
-func (m *Span_Event) String() string { return proto.CompactTextString(m) }
-func (*Span_Event) ProtoMessage() {}
-func (*Span_Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3, 0}
-}
-func (m *Span_Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Span_Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Event.Merge(m, src)
-}
-func (m *Span_Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Span_Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Event proto.InternalMessageInfo
-
-func (m *Span_Event) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *Span_Event) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Span_Event) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span_Event) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-// A pointer from the current span to another span in the same trace or in a
-// different trace. For example, this can be used in batching operations,
-// where a single batch handler processes multiple requests from different
-// traces or when the handler receives a request from a different project.
-type Span_Link struct {
- // A unique identifier of a trace that this linked span is part of. The ID is a
- // 16-byte array.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for the linked span. The ID is an 8-byte array.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // The trace_state associated with the link.
- TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
- // attributes is a collection of attribute key/value pairs on the link.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0,
- // then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Flags, a bit field.
- //
- // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
- // Context specification. To read the 8-bit W3C trace flag, use
- // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
- //
- // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
- //
- // Bits 8 and 9 represent the 3 states of whether the link is remote.
- // The states are (unknown, is not remote, is remote).
- // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
- // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
- //
- // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
- // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
- //
- // [Optional].
- Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
-}
-
-func (m *Span_Link) Reset() { *m = Span_Link{} }
-func (m *Span_Link) String() string { return proto.CompactTextString(m) }
-func (*Span_Link) ProtoMessage() {}
-func (*Span_Link) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3, 1}
-}
-func (m *Span_Link) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Span_Link) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Link.Merge(m, src)
-}
-func (m *Span_Link) XXX_Size() int {
- return m.Size()
-}
-func (m *Span_Link) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Link.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Link proto.InternalMessageInfo
-
-func (m *Span_Link) GetTraceState() string {
- if m != nil {
- return m.TraceState
- }
- return ""
-}
-
-func (m *Span_Link) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span_Link) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Span_Link) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-// The Status type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs.
-type Status struct {
- // A developer-facing human readable error message.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
- // The status code.
- Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"`
-}
-
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{4}
-}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
-}
-func (m *Status) XXX_Size() int {
- return m.Size()
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
-
-func (m *Status) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *Status) GetCode() Status_StatusCode {
- if m != nil {
- return m.Code
- }
- return Status_STATUS_CODE_UNSET
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.trace.v1.SpanFlags", SpanFlags_name, SpanFlags_value)
- proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value)
- proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value)
- proto.RegisterType((*TracesData)(nil), "opentelemetry.proto.trace.v1.TracesData")
- proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans")
- proto.RegisterType((*ScopeSpans)(nil), "opentelemetry.proto.trace.v1.ScopeSpans")
- proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span")
- proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event")
- proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link")
- proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601)
-}
-
-var fileDescriptor_5c407ac9c675a601 = []byte{
- // 1112 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0xcf, 0x6f, 0x1b, 0x45,
- 0x14, 0xf6, 0x3a, 0x6b, 0x3b, 0x79, 0x49, 0xdc, 0xed, 0xe0, 0x56, 0x4b, 0x28, 0x8e, 0xb1, 0x0a,
- 0x98, 0x56, 0xb2, 0x49, 0x7b, 0x29, 0x07, 0x44, 0x1d, 0x7b, 0x03, 0x8b, 0x13, 0x3b, 0x9a, 0x5d,
- 0x47, 0x80, 0x90, 0x96, 0xad, 0x77, 0x6a, 0x56, 0xb1, 0x67, 0xad, 0xdd, 0x71, 0xd4, 0xde, 0xf8,
- 0x13, 0xb8, 0x22, 0x71, 0x47, 0x02, 0xce, 0xdc, 0xb8, 0x57, 0x9c, 0x7a, 0x44, 0x1c, 0x2a, 0x94,
- 0x5c, 0xf8, 0x2f, 0x8a, 0x66, 0x66, 0xd7, 0x5e, 0x47, 0x91, 0xd3, 0x48, 0xf4, 0xc2, 0x25, 0x99,
- 0x79, 0x3f, 0xbe, 0xef, 0x7b, 0x6f, 0xde, 0x8c, 0x17, 0x6a, 0xc1, 0x84, 0x50, 0x46, 0x46, 0x64,
- 0x4c, 0x58, 0xf8, 0xb4, 0x31, 0x09, 0x03, 0x16, 0x34, 0x58, 0xe8, 0x0e, 0x48, 0xe3, 0x64, 0x47,
- 0x2e, 0xea, 0xc2, 0x88, 0x6e, 0x2d, 0x44, 0x4a, 0x63, 0x5d, 0x06, 0x9c, 0xec, 0x6c, 0x95, 0x86,
- 0xc1, 0x30, 0x90, 0xd9, 0x7c, 0x25, 0xdd, 0x5b, 0x77, 0x2e, 0x42, 0x1f, 0x04, 0xe3, 0x71, 0x40,
- 0x39, 0xbc, 0x5c, 0xc5, 0xb1, 0xf5, 0x8b, 0x62, 0x43, 0x12, 0x05, 0xd3, 0x50, 0x8a, 0x49, 0xd6,
- 0x32, 0xbe, 0xfa, 0x0d, 0x80, 0xcd, 0xd9, 0xa3, 0xb6, 0xcb, 0x5c, 0x84, 0xa1, 0x98, 0xf8, 0x9d,
- 0x68, 0xe2, 0xd2, 0x48, 0x57, 0x2a, 0x2b, 0xb5, 0xf5, 0x7b, 0x77, 0xeb, 0xcb, 0x64, 0xd7, 0x71,
- 0x9c, 0x63, 0xf1, 0x14, 0xbc, 0x19, 0xa6, 0xb7, 0xd5, 0x9f, 0xb2, 0xb0, 0xb9, 0x10, 0x80, 0x1c,
- 0xb8, 0xe9, 0x91, 0x49, 0x48, 0x06, 0x2e, 0x23, 0x9e, 0x13, 0x0d, 0x82, 0x49, 0xc2, 0xf6, 0x4f,
- 0x41, 0xd0, 0xd5, 0x96, 0xd3, 0x59, 0x3c, 0x43, 0x72, 0x95, 0xe6, 0x40, 0x73, 0x2b, 0xea, 0xc0,
- 0x6a, 0xa2, 0x41, 0x57, 0x2a, 0x4a, 0x6d, 0xfd, 0xde, 0x07, 0x17, 0x22, 0xce, 0x7a, 0x91, 0xaa,
- 0x61, 0x57, 0x7d, 0xf6, 0x62, 0x3b, 0x83, 0x67, 0x00, 0xc8, 0x84, 0xf5, 0xb4, 0xc4, 0xec, 0x15,
- 0x15, 0x42, 0x34, 0xd7, 0xf5, 0x36, 0x40, 0x34, 0xf8, 0x96, 0x8c, 0x5d, 0x67, 0x1a, 0x8e, 0xf4,
- 0x95, 0x8a, 0x52, 0x5b, 0xc3, 0x6b, 0xd2, 0xd2, 0x0f, 0x47, 0xd5, 0xdf, 0x14, 0x80, 0x54, 0x15,
- 0x3d, 0xc8, 0x89, 0xdc, 0xb8, 0x84, 0xfb, 0x17, 0x52, 0xc6, 0x87, 0x7f, 0xb2, 0x53, 0x37, 0x69,
- 0xc4, 0xc2, 0xe9, 0x98, 0x50, 0xe6, 0x32, 0x3f, 0xa0, 0x02, 0x28, 0x2e, 0x46, 0xe2, 0xa0, 0x07,
- 0x90, 0x4b, 0xd7, 0x50, 0xbd, 0xa4, 0x86, 0x89, 0x4b, 0xb1, 0x4c, 0xb8, 0x4c, 0xf8, 0xaf, 0x9b,
- 0xa0, 0xf2, 0x70, 0xf4, 0x35, 0xac, 0x8a, 0x7c, 0xc7, 0xf7, 0x84, 0xea, 0x8d, 0xdd, 0x26, 0x17,
- 0xf0, 0xd7, 0x8b, 0xed, 0x8f, 0x86, 0xc1, 0x39, 0x3a, 0x9f, 0xcf, 0xf0, 0x68, 0x44, 0x06, 0x2c,
- 0x08, 0x1b, 0x13, 0xcf, 0x65, 0x6e, 0xc3, 0xa7, 0x8c, 0x84, 0xd4, 0x1d, 0x35, 0xf8, 0xae, 0x2e,
- 0xe6, 0xd2, 0x6c, 0xe3, 0x82, 0x80, 0x34, 0x3d, 0xf4, 0x25, 0x14, 0xb8, 0x1c, 0x0e, 0x9e, 0x15,
- 0xe0, 0x0f, 0x63, 0xf0, 0x07, 0x57, 0x07, 0xe7, 0x72, 0xcd, 0x36, 0xce, 0x73, 0x40, 0xd3, 0x43,
- 0xdb, 0xb0, 0x2e, 0x85, 0x47, 0xcc, 0x65, 0x24, 0xae, 0x10, 0x84, 0xc9, 0xe2, 0x16, 0xf4, 0x18,
- 0x8a, 0x13, 0x37, 0x24, 0x94, 0x39, 0x89, 0x04, 0xf5, 0x3f, 0x92, 0xb0, 0x21, 0x71, 0x2d, 0x29,
- 0xa4, 0x04, 0xb9, 0xc7, 0x23, 0x77, 0x18, 0xe9, 0x5a, 0x45, 0xa9, 0x15, 0xb0, 0xdc, 0x20, 0x04,
- 0x2a, 0x75, 0xc7, 0x44, 0xcf, 0x09, 0x5d, 0x62, 0x8d, 0x3e, 0x01, 0xf5, 0xd8, 0xa7, 0x9e, 0x9e,
- 0xaf, 0x28, 0xb5, 0xe2, 0x65, 0x37, 0x94, 0xa3, 0x8b, 0x3f, 0x1d, 0x9f, 0x7a, 0x58, 0x24, 0xa2,
- 0x06, 0x94, 0x22, 0xe6, 0x86, 0xcc, 0x61, 0xfe, 0x98, 0x38, 0x53, 0xea, 0x3f, 0x71, 0xa8, 0x4b,
- 0x03, 0xbd, 0x50, 0x51, 0x6a, 0x79, 0x7c, 0x5d, 0xf8, 0x6c, 0x7f, 0x4c, 0xfa, 0xd4, 0x7f, 0xd2,
- 0x75, 0x69, 0x80, 0xee, 0x02, 0x22, 0xd4, 0x3b, 0x1f, 0xbe, 0x2a, 0xc2, 0xaf, 0x11, 0xea, 0x2d,
- 0x04, 0x1f, 0x00, 0xb8, 0x8c, 0x85, 0xfe, 0xa3, 0x29, 0x23, 0x91, 0xbe, 0x26, 0x26, 0xee, 0xfd,
- 0x4b, 0x46, 0xb8, 0x43, 0x9e, 0x1e, 0xb9, 0xa3, 0x69, 0x32, 0xb6, 0x29, 0x00, 0xf4, 0x00, 0x74,
- 0x2f, 0x0c, 0x26, 0x13, 0xe2, 0x39, 0x73, 0xab, 0x33, 0x08, 0xa6, 0x94, 0xe9, 0x50, 0x51, 0x6a,
- 0x9b, 0xf8, 0x66, 0xec, 0x6f, 0xce, 0xdc, 0x2d, 0xee, 0x45, 0x0f, 0x21, 0x4f, 0x4e, 0x08, 0x65,
- 0x91, 0xbe, 0xfe, 0x4a, 0x57, 0x97, 0x77, 0xca, 0xe0, 0x09, 0x38, 0xce, 0x43, 0x1f, 0x42, 0x29,
- 0xe1, 0x96, 0x96, 0x98, 0x77, 0x43, 0xf0, 0xa2, 0xd8, 0x27, 0x72, 0x62, 0xce, 0x8f, 0x21, 0x37,
- 0xf2, 0xe9, 0x71, 0xa4, 0x6f, 0x2e, 0xa9, 0x7b, 0x91, 0x72, 0xdf, 0xa7, 0xc7, 0x58, 0x66, 0xa1,
- 0x3a, 0xbc, 0x91, 0x10, 0x0a, 0x43, 0xcc, 0x57, 0x14, 0x7c, 0xd7, 0x63, 0x17, 0x4f, 0x88, 0xe9,
- 0x76, 0x21, 0xcf, 0xe7, 0x76, 0x1a, 0xe9, 0xd7, 0xc4, 0x53, 0x71, 0xfb, 0x12, 0x3e, 0x11, 0x1b,
- 0x37, 0x39, 0xce, 0xdc, 0xfa, 0x43, 0x81, 0x9c, 0x28, 0x01, 0xdd, 0x86, 0xe2, 0xb9, 0x23, 0x56,
- 0xc4, 0x11, 0x6f, 0xb0, 0xf4, 0xf9, 0x26, 0x23, 0x99, 0x4d, 0x8d, 0xe4, 0xe2, 0x99, 0xaf, 0xbc,
- 0xce, 0x33, 0x57, 0x97, 0x9d, 0xf9, 0xd6, 0xcb, 0x2c, 0xa8, 0xbc, 0x3f, 0xff, 0xe3, 0x07, 0x69,
- 0xb1, 0xd7, 0xea, 0xeb, 0xec, 0x75, 0x6e, 0xe9, 0xfd, 0x9a, 0xbd, 0x58, 0xf9, 0xd4, 0x8b, 0x55,
- 0xfd, 0x41, 0x81, 0xd5, 0xe4, 0xbd, 0x41, 0x6f, 0xc2, 0x0d, 0xeb, 0xb0, 0xd9, 0x75, 0x3a, 0x66,
- 0xb7, 0xed, 0xf4, 0xbb, 0xd6, 0xa1, 0xd1, 0x32, 0xf7, 0x4c, 0xa3, 0xad, 0x65, 0xd0, 0x4d, 0x40,
- 0x73, 0x97, 0xd9, 0xb5, 0x0d, 0xdc, 0x6d, 0xee, 0x6b, 0x0a, 0x2a, 0x81, 0x36, 0xb7, 0x5b, 0x06,
- 0x3e, 0x32, 0xb0, 0x96, 0x5d, 0xb4, 0xb6, 0xf6, 0x4d, 0xa3, 0x6b, 0x6b, 0x2b, 0x8b, 0x18, 0x87,
- 0xb8, 0xd7, 0xee, 0xb7, 0x0c, 0xac, 0xa9, 0x8b, 0xf6, 0x56, 0xaf, 0x6b, 0xf5, 0x0f, 0x0c, 0xac,
- 0xe5, 0xaa, 0xbf, 0x2b, 0x90, 0x97, 0x77, 0x00, 0xe9, 0x50, 0x18, 0x93, 0x28, 0x72, 0x87, 0xc9,
- 0x20, 0x27, 0x5b, 0xd4, 0x02, 0x75, 0x10, 0x78, 0xb2, 0xf3, 0xc5, 0x7b, 0x8d, 0x57, 0xb9, 0x51,
- 0xf1, 0xbf, 0x56, 0xe0, 0x11, 0x2c, 0x92, 0xab, 0x5d, 0x80, 0xb9, 0x0d, 0xdd, 0x80, 0xeb, 0x96,
- 0xdd, 0xb4, 0xfb, 0x96, 0xd3, 0xea, 0xb5, 0x0d, 0xde, 0x08, 0xc3, 0xd6, 0x32, 0x08, 0x41, 0x31,
- 0x6d, 0xee, 0x75, 0x34, 0xe5, 0x7c, 0xa8, 0x81, 0x71, 0x0f, 0x6b, 0xd9, 0xcf, 0xd5, 0x55, 0x45,
- 0xcb, 0xde, 0xf9, 0x51, 0x81, 0x35, 0xde, 0xdb, 0x3d, 0xf1, 0xdb, 0x90, 0x34, 0x77, 0x6f, 0xbf,
- 0xf9, 0xa9, 0xe5, 0xb4, 0x7b, 0x4e, 0xb7, 0x67, 0x3b, 0x7d, 0xcb, 0xd0, 0x32, 0xa8, 0x02, 0x6f,
- 0xa5, 0x5c, 0x36, 0x6e, 0xb6, 0x8c, 0x78, 0x7d, 0xd0, 0xb4, 0x3a, 0xda, 0x4b, 0x05, 0xdd, 0x81,
- 0x77, 0x53, 0x11, 0xad, 0x5e, 0xd7, 0x36, 0xbe, 0xb0, 0x9d, 0xcf, 0x9a, 0x96, 0x63, 0x5a, 0x0e,
- 0x36, 0x0e, 0x7a, 0xb6, 0x21, 0x63, 0xbf, 0xcb, 0xa2, 0xf7, 0xe0, 0x9d, 0x0b, 0x62, 0xcf, 0xc7,
- 0xa9, 0xbb, 0xbf, 0x28, 0xcf, 0x4e, 0xcb, 0xca, 0xf3, 0xd3, 0xb2, 0xf2, 0xf7, 0x69, 0x59, 0xf9,
- 0xfe, 0xac, 0x9c, 0x79, 0x7e, 0x56, 0xce, 0xfc, 0x79, 0x56, 0xce, 0xc0, 0xb6, 0x1f, 0x2c, 0x6d,
- 0xe4, 0xae, 0xfc, 0x18, 0x3d, 0xe4, 0xc6, 0x43, 0xe5, 0xab, 0xd6, 0x95, 0xaf, 0x91, 0xfc, 0xe0,
- 0x1d, 0x12, 0x3a, 0xfb, 0xfa, 0xfe, 0x39, 0x7b, 0xab, 0x37, 0x21, 0xd4, 0x9e, 0x41, 0x08, 0x70,
- 0x79, 0x97, 0xeb, 0x47, 0x3b, 0x8f, 0xf2, 0x22, 0xe3, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
- 0xfd, 0xbe, 0x84, 0xc3, 0xc3, 0x0b, 0x00, 0x00,
-}
-
-func (m *TracesData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TracesData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TracesData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceSpans) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DeprecatedScopeSpans) > 0 {
- for iNdEx := len(m.DeprecatedScopeSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DeprecatedScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeSpans) > 0 {
- for iNdEx := len(m.ScopeSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeSpans) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeSpans) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Spans) > 0 {
- for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Span) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Span) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i -= 4
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x85
- }
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x7a
- if m.DroppedLinksCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount))
- i--
- dAtA[i] = 0x70
- }
- if len(m.Links) > 0 {
- for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x6a
- }
- }
- if m.DroppedEventsCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount))
- i--
- dAtA[i] = 0x60
- }
- if len(m.Events) > 0 {
- for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x50
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- }
- if m.EndTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano))
- i--
- dAtA[i] = 0x41
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x39
- }
- if m.Kind != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.Kind))
- i--
- dAtA[i] = 0x30
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x2a
- }
- {
- size := m.ParentSpanId.Size()
- i -= size
- if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- if len(m.TraceState) > 0 {
- i -= len(m.TraceState)
- copy(dAtA[i:], m.TraceState)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState)))
- i--
- dAtA[i] = 0x1a
- }
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Span_Event) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x20
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x12
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x9
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Span_Link) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i -= 4
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
- i--
- dAtA[i] = 0x35
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x28
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.TraceState) > 0 {
- i -= len(m.TraceState)
- copy(dAtA[i:], m.TraceState)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState)))
- i--
- dAtA[i] = 0x1a
- }
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Status) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Status) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Code != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.Code))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Message) > 0 {
- i -= len(m.Message)
- copy(dAtA[i:], m.Message)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.Message)))
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintTrace(dAtA []byte, offset int, v uint64) int {
- offset -= sovTrace(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *TracesData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for _, e := range m.ResourceSpans {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceSpans) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovTrace(uint64(l))
- if len(m.ScopeSpans) > 0 {
- for _, e := range m.ScopeSpans {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if len(m.DeprecatedScopeSpans) > 0 {
- for _, e := range m.DeprecatedScopeSpans {
- l = e.Size()
- n += 2 + l + sovTrace(uint64(l))
- }
- }
- return n
-}
-
-func (m *ScopeSpans) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovTrace(uint64(l))
- if len(m.Spans) > 0 {
- for _, e := range m.Spans {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- return n
-}
-
-func (m *Span) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.TraceId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = len(m.TraceState)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- l = m.ParentSpanId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if m.Kind != 0 {
- n += 1 + sovTrace(uint64(m.Kind))
- }
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.EndTimeUnixNano != 0 {
- n += 9
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
- }
- if len(m.Events) > 0 {
- for _, e := range m.Events {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedEventsCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedEventsCount))
- }
- if len(m.Links) > 0 {
- for _, e := range m.Links {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedLinksCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedLinksCount))
- }
- l = m.Status.Size()
- n += 1 + l + sovTrace(uint64(l))
- if m.Flags != 0 {
- n += 6
- }
- return n
-}
-
-func (m *Span_Event) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TimeUnixNano != 0 {
- n += 9
- }
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
- }
- return n
-}
-
-func (m *Span_Link) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.TraceId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = len(m.TraceState)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
- }
- if m.Flags != 0 {
- n += 5
- }
- return n
-}
-
-func (m *Status) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Message)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if m.Code != 0 {
- n += 1 + sovTrace(uint64(m.Code))
- }
- return n
-}
-
-func sovTrace(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozTrace(x uint64) (n int) {
- return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *TracesData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TracesData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TracesData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceSpans = append(m.ResourceSpans, &ResourceSpans{})
- if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceSpans) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeSpans = append(m.ScopeSpans, &ScopeSpans{})
- if err := m.ScopeSpans[len(m.ScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeprecatedScopeSpans = append(m.DeprecatedScopeSpans, &ScopeSpans{})
- if err := m.DeprecatedScopeSpans[len(m.DeprecatedScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeSpans) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeSpans: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeSpans: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Spans = append(m.Spans, &Span{})
- if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Span) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Span: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TraceState = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
- }
- m.Kind = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Kind |= Span_SpanKind(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 8:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType)
- }
- m.EndTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Events = append(m.Events, &Span_Event{})
- if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType)
- }
- m.DroppedEventsCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedEventsCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 13:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Links = append(m.Links, &Span_Link{})
- if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 14:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType)
- }
- m.DroppedLinksCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedLinksCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 15:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 16:
- if wireType != 5 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- if (iNdEx + 4) > l {
- return io.ErrUnexpectedEOF
- }
- m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Span_Event) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Event: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Span_Link) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Link: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TraceState = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 5 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- if (iNdEx + 4) > l {
- return io.ErrUnexpectedEOF
- }
- m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Status) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Status: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Message = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
- }
- m.Code = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Code |= Status_StatusCode(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipTrace(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthTrace
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupTrace
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTrace
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go
deleted file mode 100644
index 597e071dd..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
-
-import (
- "encoding/hex"
- "errors"
-
- "github.com/gogo/protobuf/proto"
-
- "go.opentelemetry.io/collector/pdata/internal/json"
-)
-
-const spanIDSize = 8
-
-var (
- errMarshalSpanID = errors.New("marshal: invalid buffer length for SpanID")
- errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length")
-)
-
-// SpanID is a custom data type that is used for all span_id fields in OTLP
-// Protobuf messages.
-type SpanID [spanIDSize]byte
-
-var _ proto.Sizer = (*SpanID)(nil)
-
-// Size returns the size of the data to serialize.
-func (sid SpanID) Size() int {
- if sid.IsEmpty() {
- return 0
- }
- return spanIDSize
-}
-
-// IsEmpty returns true if id contains at least one non-zero byte.
-func (sid SpanID) IsEmpty() bool {
- return sid == [spanIDSize]byte{}
-}
-
-// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization.
-func (sid SpanID) MarshalTo(data []byte) (n int, err error) {
- if sid.IsEmpty() {
- return 0, nil
- }
-
- if len(data) < spanIDSize {
- return 0, errMarshalSpanID
- }
-
- return copy(data, sid[:]), nil
-}
-
-// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization.
-func (sid *SpanID) Unmarshal(data []byte) error {
- if len(data) == 0 {
- *sid = [spanIDSize]byte{}
- return nil
- }
-
- if len(data) != spanIDSize {
- return errUnmarshalSpanID
- }
-
- copy(sid[:], data)
- return nil
-}
-
-// MarshalJSONStream converts SpanID into a hex string.
-func (sid SpanID) MarshalJSONStream(dest *json.Stream) {
- dest.WriteString(hex.EncodeToString(sid[:]))
-}
-
-// UnmarshalJSONIter decodes SpanID from hex string.
-func (sid *SpanID) UnmarshalJSONIter(iter *json.Iterator) {
- *sid = [spanIDSize]byte{}
- unmarshalJSON(sid[:], iter)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go
deleted file mode 100644
index 0e7c98ac9..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
-
-import (
- "encoding/hex"
- "errors"
-
- "github.com/gogo/protobuf/proto"
-
- "go.opentelemetry.io/collector/pdata/internal/json"
-)
-
-const traceIDSize = 16
-
-var (
- errMarshalTraceID = errors.New("marshal: invalid buffer length for TraceID")
- errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length")
-)
-
-// TraceID is a custom data type that is used for all trace_id fields in OTLP
-// Protobuf messages.
-type TraceID [traceIDSize]byte
-
-var _ proto.Sizer = (*TraceID)(nil)
-
-// Size returns the size of the data to serialize.
-func (tid TraceID) Size() int {
- if tid.IsEmpty() {
- return 0
- }
- return traceIDSize
-}
-
-// IsEmpty returns true if id contains at leas one non-zero byte.
-func (tid TraceID) IsEmpty() bool {
- return tid == [traceIDSize]byte{}
-}
-
-// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization.
-func (tid TraceID) MarshalTo(data []byte) (n int, err error) {
- if tid.IsEmpty() {
- return 0, nil
- }
-
- if len(data) < traceIDSize {
- return 0, errMarshalTraceID
- }
-
- return copy(data, tid[:]), nil
-}
-
-// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization.
-func (tid *TraceID) Unmarshal(data []byte) error {
- if len(data) == 0 {
- *tid = [traceIDSize]byte{}
- return nil
- }
-
- if len(data) != traceIDSize {
- return errUnmarshalTraceID
- }
-
- copy(tid[:], data)
- return nil
-}
-
-// MarshalJSONStream converts TraceID into a hex string.
-func (tid TraceID) MarshalJSONStream(dest *json.Stream) {
- dest.WriteString(hex.EncodeToString(tid[:]))
-}
-
-// UnmarshalJSONIter decodes TraceID from hex string.
-func (tid *TraceID) UnmarshalJSONIter(iter *json.Iterator) {
- *tid = [profileIDSize]byte{}
- unmarshalJSON(tid[:], iter)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go
new file mode 100644
index 000000000..6c3a7208d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED = AggregationTemporality(0)
+ AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA = AggregationTemporality(1)
+ AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE = AggregationTemporality(2)
+)
+
+// AggregationTemporality defines how a metric aggregator reports aggregated values.
+// It describes how those values relate to the time interval over which they are aggregated.
+type AggregationTemporality int32
+
+var AggregationTemporality_name = map[int32]string{
+ 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
+ 1: "AGGREGATION_TEMPORALITY_DELTA",
+ 2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
+}
+
+var AggregationTemporality_value = map[string]int32{
+ "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
+ "AGGREGATION_TEMPORALITY_DELTA": 1,
+ "AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go
new file mode 100644
index 000000000..4ad9a5b29
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED = SeverityNumber(0)
+ SeverityNumber_SEVERITY_NUMBER_TRACE = SeverityNumber(1)
+ SeverityNumber_SEVERITY_NUMBER_TRACE2 = SeverityNumber(2)
+ SeverityNumber_SEVERITY_NUMBER_TRACE3 = SeverityNumber(3)
+ SeverityNumber_SEVERITY_NUMBER_TRACE4 = SeverityNumber(4)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG = SeverityNumber(5)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG2 = SeverityNumber(6)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG3 = SeverityNumber(7)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG4 = SeverityNumber(8)
+ SeverityNumber_SEVERITY_NUMBER_INFO = SeverityNumber(9)
+ SeverityNumber_SEVERITY_NUMBER_INFO2 = SeverityNumber(10)
+ SeverityNumber_SEVERITY_NUMBER_INFO3 = SeverityNumber(11)
+ SeverityNumber_SEVERITY_NUMBER_INFO4 = SeverityNumber(12)
+ SeverityNumber_SEVERITY_NUMBER_WARN = SeverityNumber(13)
+ SeverityNumber_SEVERITY_NUMBER_WARN2 = SeverityNumber(14)
+ SeverityNumber_SEVERITY_NUMBER_WARN3 = SeverityNumber(15)
+ SeverityNumber_SEVERITY_NUMBER_WARN4 = SeverityNumber(16)
+ SeverityNumber_SEVERITY_NUMBER_ERROR = SeverityNumber(17)
+ SeverityNumber_SEVERITY_NUMBER_ERROR2 = SeverityNumber(18)
+ SeverityNumber_SEVERITY_NUMBER_ERROR3 = SeverityNumber(19)
+ SeverityNumber_SEVERITY_NUMBER_ERROR4 = SeverityNumber(20)
+ SeverityNumber_SEVERITY_NUMBER_FATAL = SeverityNumber(21)
+ SeverityNumber_SEVERITY_NUMBER_FATAL2 = SeverityNumber(22)
+ SeverityNumber_SEVERITY_NUMBER_FATAL3 = SeverityNumber(23)
+ SeverityNumber_SEVERITY_NUMBER_FATAL4 = SeverityNumber(24)
+)
+
+// SeverityNumber represent possible values for LogRecord.SeverityNumber
+type SeverityNumber int32
+
+var SeverityNumber_name = map[int32]string{
+ 0: "SEVERITY_NUMBER_UNSPECIFIED",
+ 1: "SEVERITY_NUMBER_TRACE ",
+ 2: "SEVERITY_NUMBER_TRACE2",
+ 3: "SEVERITY_NUMBER_TRACE3",
+ 4: "SEVERITY_NUMBER_TRACE4",
+ 5: "SEVERITY_NUMBER_DEBUG",
+ 6: "SEVERITY_NUMBER_DEBUG2",
+ 7: "SEVERITY_NUMBER_DEBUG3",
+ 8: "SEVERITY_NUMBER_DEBUG4",
+ 9: "SEVERITY_NUMBER_INFO",
+ 10: "SEVERITY_NUMBER_INFO2",
+ 11: "SEVERITY_NUMBER_INFO3",
+ 12: "SEVERITY_NUMBER_INFO4",
+ 13: "SEVERITY_NUMBER_WARN",
+ 14: "SEVERITY_NUMBER_WARN2",
+ 15: "SEVERITY_NUMBER_WARN3",
+ 16: "SEVERITY_NUMBER_WARN4",
+ 17: "SEVERITY_NUMBER_ERROR",
+ 18: "SEVERITY_NUMBER_ERROR2",
+ 19: "SEVERITY_NUMBER_ERROR3",
+ 20: "SEVERITY_NUMBER_ERROR4",
+ 21: "SEVERITY_NUMBER_FATAL",
+ 22: "SEVERITY_NUMBER_FATAL2",
+ 23: "SEVERITY_NUMBER_FATAL3",
+ 24: "SEVERITY_NUMBER_FATAL4",
+}
+
+var SeverityNumber_value = map[string]int32{
+ "SEVERITY_NUMBER_UNSPECIFIED": 0,
+ "SEVERITY_NUMBER_TRACE ": 1,
+ "SEVERITY_NUMBER_TRACE2": 2,
+ "SEVERITY_NUMBER_TRACE3": 3,
+ "SEVERITY_NUMBER_TRACE4": 4,
+ "SEVERITY_NUMBER_DEBUG": 5,
+ "SEVERITY_NUMBER_DEBUG2": 6,
+ "SEVERITY_NUMBER_DEBUG3": 7,
+ "SEVERITY_NUMBER_DEBUG4": 8,
+ "SEVERITY_NUMBER_INFO": 9,
+ "SEVERITY_NUMBER_INFO2": 10,
+ "SEVERITY_NUMBER_INFO3": 11,
+ "SEVERITY_NUMBER_INFO4": 12,
+ "SEVERITY_NUMBER_WARN": 13,
+ "SEVERITY_NUMBER_WARN2": 14,
+ "SEVERITY_NUMBER_WARN3": 15,
+ "SEVERITY_NUMBER_WARN4": 16,
+ "SEVERITY_NUMBER_ERROR": 17,
+ "SEVERITY_NUMBER_ERROR2": 18,
+ "SEVERITY_NUMBER_ERROR3": 19,
+ "SEVERITY_NUMBER_ERROR4": 20,
+ "SEVERITY_NUMBER_FATAL": 21,
+ "SEVERITY_NUMBER_FATAL2": 22,
+ "SEVERITY_NUMBER_FATAL3": 23,
+ "SEVERITY_NUMBER_FATAL4": 24,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go
new file mode 100644
index 000000000..1632aadde
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go
@@ -0,0 +1,38 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ SpanKind_SPAN_KIND_UNSPECIFIED = SpanKind(0)
+ SpanKind_SPAN_KIND_INTERNAL = SpanKind(1)
+ SpanKind_SPAN_KIND_SERVER = SpanKind(2)
+ SpanKind_SPAN_KIND_CLIENT = SpanKind(3)
+ SpanKind_SPAN_KIND_PRODUCER = SpanKind(4)
+ SpanKind_SPAN_KIND_CONSUMER = SpanKind(5)
+)
+
+// SpanKind is the type of span.
+// Can be used to specify additional relationships between spans in addition to a parent/child relationship.
+type SpanKind int32
+
+var SpanKind_name = map[int32]string{
+ 0: "SPAN_KIND_UNSPECIFIED",
+ 1: "SPAN_KIND_INTERNAL",
+ 2: "SPAN_KIND_SERVER",
+ 3: "SPAN_KIND_CLIENT",
+ 4: "SPAN_KIND_PRODUCER",
+ 5: "SPAN_KIND_CONSUMER",
+}
+
+var SpanKind_value = map[string]int32{
+ "SPAN_KIND_UNSPECIFIED": 0,
+ "SPAN_KIND_INTERNAL": 1,
+ "SPAN_KIND_SERVER": 2,
+ "SPAN_KIND_CLIENT": 3,
+ "SPAN_KIND_PRODUCER": 4,
+ "SPAN_KIND_CONSUMER": 5,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go
new file mode 100644
index 000000000..ac5aab759
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ StatusCode_STATUS_CODE_UNSET = StatusCode(0)
+ StatusCode_STATUS_CODE_OK = StatusCode(1)
+ StatusCode_STATUS_CODE_ERROR = StatusCode(2)
+)
+
+// StatusCode is the status of the span, for the semantics of codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+var StatusCode_name = map[int32]string{
+ 0: "STATUS_CODE_UNSET",
+ 1: "STATUS_CODE_OK",
+ 2: "STATUS_CODE_ERROR",
+}
+
+var StatusCode_value = map[string]int32{
+ "STATUS_CODE_UNSET": 0,
+ "STATUS_CODE_OK": 1,
+ "STATUS_CODE_ERROR": 2,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go
new file mode 100644
index 000000000..ddc86601c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go
@@ -0,0 +1,770 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *AnyValue) GetValue() any {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type AnyValue_StringValue struct {
+ StringValue string
+}
+
+func (m *AnyValue) GetStringValue() string {
+ if v, ok := m.GetValue().(*AnyValue_StringValue); ok {
+ return v.StringValue
+ }
+ return ""
+}
+
+type AnyValue_BoolValue struct {
+ BoolValue bool
+}
+
+func (m *AnyValue) GetBoolValue() bool {
+ if v, ok := m.GetValue().(*AnyValue_BoolValue); ok {
+ return v.BoolValue
+ }
+ return false
+}
+
+type AnyValue_IntValue struct {
+ IntValue int64
+}
+
+func (m *AnyValue) GetIntValue() int64 {
+ if v, ok := m.GetValue().(*AnyValue_IntValue); ok {
+ return v.IntValue
+ }
+ return int64(0)
+}
+
+type AnyValue_DoubleValue struct {
+ DoubleValue float64
+}
+
+func (m *AnyValue) GetDoubleValue() float64 {
+ if v, ok := m.GetValue().(*AnyValue_DoubleValue); ok {
+ return v.DoubleValue
+ }
+ return float64(0)
+}
+
+type AnyValue_ArrayValue struct {
+ ArrayValue *ArrayValue
+}
+
+func (m *AnyValue) GetArrayValue() *ArrayValue {
+ if v, ok := m.GetValue().(*AnyValue_ArrayValue); ok {
+ return v.ArrayValue
+ }
+ return nil
+}
+
+type AnyValue_KvlistValue struct {
+ KvlistValue *KeyValueList
+}
+
+func (m *AnyValue) GetKvlistValue() *KeyValueList {
+ if v, ok := m.GetValue().(*AnyValue_KvlistValue); ok {
+ return v.KvlistValue
+ }
+ return nil
+}
+
+type AnyValue_BytesValue struct {
+ BytesValue []byte
+}
+
+func (m *AnyValue) GetBytesValue() []byte {
+ if v, ok := m.GetValue().(*AnyValue_BytesValue); ok {
+ return v.BytesValue
+ }
+ return nil
+}
+
+type AnyValue struct {
+ Value any
+}
+
+var (
+ protoPoolAnyValue = sync.Pool{
+ New: func() any {
+ return &AnyValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_StringValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_StringValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_BoolValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_BoolValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_IntValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_IntValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_DoubleValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_DoubleValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_ArrayValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_ArrayValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_KvlistValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_KvlistValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_BytesValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_BytesValue{}
+ },
+ }
+)
+
+func NewAnyValue() *AnyValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue{}
+ }
+ return protoPoolAnyValue.Get().(*AnyValue)
+}
+
+func DeleteAnyValue(orig *AnyValue, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ switch ov := orig.Value.(type) {
+ case *AnyValue_StringValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.StringValue = ""
+ ProtoPoolAnyValue_StringValue.Put(ov)
+ }
+ case *AnyValue_BoolValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.BoolValue = false
+ ProtoPoolAnyValue_BoolValue.Put(ov)
+ }
+ case *AnyValue_IntValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.IntValue = int64(0)
+ ProtoPoolAnyValue_IntValue.Put(ov)
+ }
+ case *AnyValue_DoubleValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.DoubleValue = float64(0)
+ ProtoPoolAnyValue_DoubleValue.Put(ov)
+ }
+ case *AnyValue_ArrayValue:
+ DeleteArrayValue(ov.ArrayValue, true)
+ ov.ArrayValue = nil
+ ProtoPoolAnyValue_ArrayValue.Put(ov)
+ case *AnyValue_KvlistValue:
+ DeleteKeyValueList(ov.KvlistValue, true)
+ ov.KvlistValue = nil
+ ProtoPoolAnyValue_KvlistValue.Put(ov)
+ case *AnyValue_BytesValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.BytesValue = nil
+ ProtoPoolAnyValue_BytesValue.Put(ov)
+ }
+
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolAnyValue.Put(orig)
+ }
+}
+
+func CopyAnyValue(dest, src *AnyValue) *AnyValue {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewAnyValue()
+ }
+ switch t := src.Value.(type) {
+ case *AnyValue_StringValue:
+ var ov *AnyValue_StringValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_StringValue{}
+ } else {
+ ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
+ }
+ ov.StringValue = t.StringValue
+ dest.Value = ov
+ case *AnyValue_BoolValue:
+ var ov *AnyValue_BoolValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BoolValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
+ }
+ ov.BoolValue = t.BoolValue
+ dest.Value = ov
+ case *AnyValue_IntValue:
+ var ov *AnyValue_IntValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_IntValue{}
+ } else {
+ ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
+ }
+ ov.IntValue = t.IntValue
+ dest.Value = ov
+ case *AnyValue_DoubleValue:
+ var ov *AnyValue_DoubleValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_DoubleValue{}
+ } else {
+ ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
+ }
+ ov.DoubleValue = t.DoubleValue
+ dest.Value = ov
+ case *AnyValue_ArrayValue:
+ var ov *AnyValue_ArrayValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_ArrayValue{}
+ } else {
+ ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
+ }
+ ov.ArrayValue = NewArrayValue()
+ CopyArrayValue(ov.ArrayValue, t.ArrayValue)
+ dest.Value = ov
+
+ case *AnyValue_KvlistValue:
+ var ov *AnyValue_KvlistValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_KvlistValue{}
+ } else {
+ ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
+ }
+ ov.KvlistValue = NewKeyValueList()
+ CopyKeyValueList(ov.KvlistValue, t.KvlistValue)
+ dest.Value = ov
+
+ case *AnyValue_BytesValue:
+ var ov *AnyValue_BytesValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BytesValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
+ }
+ ov.BytesValue = t.BytesValue
+ dest.Value = ov
+ default:
+ dest.Value = nil
+ }
+
+ return dest
+}
+
+func CopyAnyValueSlice(dest, src []AnyValue) []AnyValue {
+ var newDest []AnyValue
+ if cap(dest) < len(src) {
+ newDest = make([]AnyValue, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteAnyValue(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyAnyValue(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyAnyValuePtrSlice(dest, src []*AnyValue) []*AnyValue {
+ var newDest []*AnyValue
+ if cap(dest) < len(src) {
+ newDest = make([]*AnyValue, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewAnyValue()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteAnyValue(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewAnyValue()
+ }
+ }
+ for i := range src {
+ CopyAnyValue(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *AnyValue) Reset() {
+ *orig = AnyValue{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *AnyValue) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ switch orig := orig.Value.(type) {
+ case *AnyValue_StringValue:
+ dest.WriteObjectField("stringValue")
+ dest.WriteString(orig.StringValue)
+ case *AnyValue_BoolValue:
+ dest.WriteObjectField("boolValue")
+ dest.WriteBool(orig.BoolValue)
+ case *AnyValue_IntValue:
+ dest.WriteObjectField("intValue")
+ dest.WriteInt64(orig.IntValue)
+ case *AnyValue_DoubleValue:
+ dest.WriteObjectField("doubleValue")
+ dest.WriteFloat64(orig.DoubleValue)
+ case *AnyValue_ArrayValue:
+ if orig.ArrayValue != nil {
+ dest.WriteObjectField("arrayValue")
+ orig.ArrayValue.MarshalJSON(dest)
+ }
+ case *AnyValue_KvlistValue:
+ if orig.KvlistValue != nil {
+ dest.WriteObjectField("kvlistValue")
+ orig.KvlistValue.MarshalJSON(dest)
+ }
+ case *AnyValue_BytesValue:
+
+ dest.WriteObjectField("bytesValue")
+ dest.WriteBytes(orig.BytesValue)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *AnyValue) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+
+ case "stringValue", "string_value":
+ {
+ var ov *AnyValue_StringValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_StringValue{}
+ } else {
+ ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
+ }
+ ov.StringValue = iter.ReadString()
+ orig.Value = ov
+ }
+
+ case "boolValue", "bool_value":
+ {
+ var ov *AnyValue_BoolValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BoolValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
+ }
+ ov.BoolValue = iter.ReadBool()
+ orig.Value = ov
+ }
+
+ case "intValue", "int_value":
+ {
+ var ov *AnyValue_IntValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_IntValue{}
+ } else {
+ ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
+ }
+ ov.IntValue = iter.ReadInt64()
+ orig.Value = ov
+ }
+
+ case "doubleValue", "double_value":
+ {
+ var ov *AnyValue_DoubleValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_DoubleValue{}
+ } else {
+ ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
+ }
+ ov.DoubleValue = iter.ReadFloat64()
+ orig.Value = ov
+ }
+
+ case "arrayValue", "array_value":
+ {
+ var ov *AnyValue_ArrayValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_ArrayValue{}
+ } else {
+ ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
+ }
+ ov.ArrayValue = NewArrayValue()
+ ov.ArrayValue.UnmarshalJSON(iter)
+ orig.Value = ov
+ }
+
+ case "kvlistValue", "kvlist_value":
+ {
+ var ov *AnyValue_KvlistValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_KvlistValue{}
+ } else {
+ ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
+ }
+ ov.KvlistValue = NewKeyValueList()
+ ov.KvlistValue.UnmarshalJSON(iter)
+ orig.Value = ov
+ }
+
+ case "bytesValue", "bytes_value":
+ {
+ var ov *AnyValue_BytesValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BytesValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
+ }
+ ov.BytesValue = iter.ReadBytes()
+ orig.Value = ov
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *AnyValue) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ switch orig := orig.Value.(type) {
+ case nil:
+ _ = orig
+ break
+ case *AnyValue_StringValue:
+ l = len(orig.StringValue)
+ n += 1 + proto.Sov(uint64(l)) + l
+ case *AnyValue_BoolValue:
+ n += 2
+ case *AnyValue_IntValue:
+ n += 1 + proto.Sov(uint64(orig.IntValue))
+ case *AnyValue_DoubleValue:
+ n += 9
+ case *AnyValue_ArrayValue:
+ if orig.ArrayValue != nil {
+ l = orig.ArrayValue.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *AnyValue_KvlistValue:
+ if orig.KvlistValue != nil {
+ l = orig.KvlistValue.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *AnyValue_BytesValue:
+ l = len(orig.BytesValue)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *AnyValue) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ switch orig := orig.Value.(type) {
+ case *AnyValue_StringValue:
+ l = len(orig.StringValue)
+ pos -= l
+ copy(buf[pos:], orig.StringValue)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ case *AnyValue_BoolValue:
+ pos--
+ if orig.BoolValue {
+ buf[pos] = 1
+ } else {
+ buf[pos] = 0
+ }
+ pos--
+ buf[pos] = 0x10
+
+ case *AnyValue_IntValue:
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.IntValue))
+ pos--
+ buf[pos] = 0x18
+
+ case *AnyValue_DoubleValue:
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.DoubleValue))
+ pos--
+ buf[pos] = 0x21
+
+ case *AnyValue_ArrayValue:
+ if orig.ArrayValue != nil {
+ l = orig.ArrayValue.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ case *AnyValue_KvlistValue:
+ if orig.KvlistValue != nil {
+ l = orig.KvlistValue.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ case *AnyValue_BytesValue:
+ l = len(orig.BytesValue)
+ pos -= l
+ copy(buf[pos:], orig.BytesValue)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+
+ }
+ return len(buf) - pos
+}
+
+func (orig *AnyValue) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_StringValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_StringValue{}
+ } else {
+ ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
+ }
+ ov.StringValue = string(buf[startPos:pos])
+ orig.Value = ov
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *AnyValue_BoolValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BoolValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
+ }
+ ov.BoolValue = num != 0
+ orig.Value = ov
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *AnyValue_IntValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_IntValue{}
+ } else {
+ ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
+ }
+ ov.IntValue = int64(num)
+ orig.Value = ov
+
+ case 4:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *AnyValue_DoubleValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_DoubleValue{}
+ } else {
+ ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
+ }
+ ov.DoubleValue = math.Float64frombits(num)
+ orig.Value = ov
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_ArrayValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_ArrayValue{}
+ } else {
+ ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
+ }
+ ov.ArrayValue = NewArrayValue()
+ err = ov.ArrayValue.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Value = ov
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_KvlistValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_KvlistValue{}
+ } else {
+ ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
+ }
+ ov.KvlistValue = NewKeyValueList()
+ err = ov.KvlistValue.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Value = ov
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_BytesValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BytesValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
+ }
+ if length != 0 {
+ ov.BytesValue = make([]byte, length)
+ copy(ov.BytesValue, buf[startPos:pos])
+ }
+ orig.Value = ov
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestAnyValue() *AnyValue {
+ orig := NewAnyValue()
+ orig.Value = &AnyValue_StringValue{StringValue: "test_stringvalue"}
+ return orig
+}
+
+func GenTestAnyValuePtrSlice() []*AnyValue {
+ orig := make([]*AnyValue, 5)
+ orig[0] = NewAnyValue()
+ orig[1] = GenTestAnyValue()
+ orig[2] = NewAnyValue()
+ orig[3] = GenTestAnyValue()
+ orig[4] = NewAnyValue()
+ return orig
+}
+
+func GenTestAnyValueSlice() []AnyValue {
+ orig := make([]AnyValue, 5)
+ orig[1] = *GenTestAnyValue()
+ orig[3] = *GenTestAnyValue()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go
new file mode 100644
index 000000000..a4367eac2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message since oneof in AnyValue does not allow repeated fields.
+type ArrayValue struct {
+ Values []AnyValue
+}
+
+var (
+ protoPoolArrayValue = sync.Pool{
+ New: func() any {
+ return &ArrayValue{}
+ },
+ }
+)
+
+func NewArrayValue() *ArrayValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &ArrayValue{}
+ }
+ return protoPoolArrayValue.Get().(*ArrayValue)
+}
+
+func DeleteArrayValue(orig *ArrayValue, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Values {
+ DeleteAnyValue(&orig.Values[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolArrayValue.Put(orig)
+ }
+}
+
+func CopyArrayValue(dest, src *ArrayValue) *ArrayValue {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewArrayValue()
+ }
+ dest.Values = CopyAnyValueSlice(dest.Values, src.Values)
+
+ return dest
+}
+
+func CopyArrayValueSlice(dest, src []ArrayValue) []ArrayValue {
+ var newDest []ArrayValue
+ if cap(dest) < len(src) {
+ newDest = make([]ArrayValue, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteArrayValue(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyArrayValue(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyArrayValuePtrSlice(dest, src []*ArrayValue) []*ArrayValue {
+ var newDest []*ArrayValue
+ if cap(dest) < len(src) {
+ newDest = make([]*ArrayValue, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewArrayValue()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteArrayValue(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewArrayValue()
+ }
+ }
+ for i := range src {
+ CopyArrayValue(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ArrayValue) Reset() {
+ *orig = ArrayValue{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ArrayValue) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Values) > 0 {
+ dest.WriteObjectField("values")
+ dest.WriteArrayStart()
+ orig.Values[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Values); i++ {
+ dest.WriteMore()
+ orig.Values[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ArrayValue) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "values":
+ for iter.ReadArray() {
+ orig.Values = append(orig.Values, AnyValue{})
+ orig.Values[len(orig.Values)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ArrayValue) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Values {
+ l = orig.Values[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ArrayValue) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Values) - 1; i >= 0; i-- {
+ l = orig.Values[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ArrayValue) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Values = append(orig.Values, AnyValue{})
+ err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestArrayValue() *ArrayValue {
+ orig := NewArrayValue()
+ orig.Values = []AnyValue{{}, *GenTestAnyValue()}
+ return orig
+}
+
+func GenTestArrayValuePtrSlice() []*ArrayValue {
+ orig := make([]*ArrayValue, 5)
+ orig[0] = NewArrayValue()
+ orig[1] = GenTestArrayValue()
+ orig[2] = NewArrayValue()
+ orig[3] = GenTestArrayValue()
+ orig[4] = NewArrayValue()
+ return orig
+}
+
+func GenTestArrayValueSlice() []ArrayValue {
+ orig := make([]ArrayValue, 5)
+ orig[1] = *GenTestArrayValue()
+ orig[3] = *GenTestArrayValue()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go
new file mode 100644
index 000000000..47c562e68
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go
@@ -0,0 +1,346 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type EntityRef struct {
+ SchemaUrl string
+ Type string
+ IdKeys []string
+ DescriptionKeys []string
+}
+
+var (
+ protoPoolEntityRef = sync.Pool{
+ New: func() any {
+ return &EntityRef{}
+ },
+ }
+)
+
+func NewEntityRef() *EntityRef {
+ if !UseProtoPooling.IsEnabled() {
+ return &EntityRef{}
+ }
+ return protoPoolEntityRef.Get().(*EntityRef)
+}
+
+func DeleteEntityRef(orig *EntityRef, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolEntityRef.Put(orig)
+ }
+}
+
+func CopyEntityRef(dest, src *EntityRef) *EntityRef {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewEntityRef()
+ }
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.Type = src.Type
+
+ dest.IdKeys = append(dest.IdKeys[:0], src.IdKeys...)
+ dest.DescriptionKeys = append(dest.DescriptionKeys[:0], src.DescriptionKeys...)
+
+ return dest
+}
+
+func CopyEntityRefSlice(dest, src []EntityRef) []EntityRef {
+ var newDest []EntityRef
+ if cap(dest) < len(src) {
+ newDest = make([]EntityRef, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteEntityRef(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyEntityRef(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyEntityRefPtrSlice(dest, src []*EntityRef) []*EntityRef {
+ var newDest []*EntityRef
+ if cap(dest) < len(src) {
+ newDest = make([]*EntityRef, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewEntityRef()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteEntityRef(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewEntityRef()
+ }
+ }
+ for i := range src {
+ CopyEntityRef(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *EntityRef) Reset() {
+ *orig = EntityRef{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *EntityRef) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if orig.Type != "" {
+ dest.WriteObjectField("type")
+ dest.WriteString(orig.Type)
+ }
+ if len(orig.IdKeys) > 0 {
+ dest.WriteObjectField("idKeys")
+ dest.WriteArrayStart()
+ dest.WriteString(orig.IdKeys[0])
+ for i := 1; i < len(orig.IdKeys); i++ {
+ dest.WriteMore()
+ dest.WriteString(orig.IdKeys[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.DescriptionKeys) > 0 {
+ dest.WriteObjectField("descriptionKeys")
+ dest.WriteArrayStart()
+ dest.WriteString(orig.DescriptionKeys[0])
+ for i := 1; i < len(orig.DescriptionKeys); i++ {
+ dest.WriteMore()
+ dest.WriteString(orig.DescriptionKeys[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *EntityRef) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "type":
+ orig.Type = iter.ReadString()
+ case "idKeys", "id_keys":
+ for iter.ReadArray() {
+ orig.IdKeys = append(orig.IdKeys, iter.ReadString())
+ }
+
+ case "descriptionKeys", "description_keys":
+ for iter.ReadArray() {
+ orig.DescriptionKeys = append(orig.DescriptionKeys, iter.ReadString())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *EntityRef) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Type)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for _, s := range orig.IdKeys {
+ l = len(s)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for _, s := range orig.DescriptionKeys {
+ l = len(s)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *EntityRef) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Type)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Type)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ for i := len(orig.IdKeys) - 1; i >= 0; i-- {
+ l = len(orig.IdKeys[i])
+ pos -= l
+ copy(buf[pos:], orig.IdKeys[i])
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DescriptionKeys) - 1; i >= 0; i-- {
+ l = len(orig.DescriptionKeys[i])
+ pos -= l
+ copy(buf[pos:], orig.DescriptionKeys[i])
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ return len(buf) - pos
+}
+
+func (orig *EntityRef) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Type = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.IdKeys = append(orig.IdKeys, string(buf[startPos:pos]))
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DescriptionKeys = append(orig.DescriptionKeys, string(buf[startPos:pos]))
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestEntityRef() *EntityRef {
+ orig := NewEntityRef()
+ orig.SchemaUrl = "test_schemaurl"
+ orig.Type = "test_type"
+ orig.IdKeys = []string{"", "test_idkeys"}
+ orig.DescriptionKeys = []string{"", "test_descriptionkeys"}
+ return orig
+}
+
+func GenTestEntityRefPtrSlice() []*EntityRef {
+ orig := make([]*EntityRef, 5)
+ orig[0] = NewEntityRef()
+ orig[1] = GenTestEntityRef()
+ orig[2] = NewEntityRef()
+ orig[3] = GenTestEntityRef()
+ orig[4] = NewEntityRef()
+ return orig
+}
+
+func GenTestEntityRefSlice() []EntityRef {
+ orig := make([]EntityRef, 5)
+ orig[1] = *GenTestEntityRef()
+ orig[3] = *GenTestEntityRef()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplar.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go
similarity index 50%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplar.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go
index 3c3222e6d..29223128d 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplar.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go
@@ -12,41 +12,79 @@ import (
"math"
"sync"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+func (m *Exemplar) GetValue() any {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Exemplar_AsDouble struct {
+ AsDouble float64
+}
+
+func (m *Exemplar) GetAsDouble() float64 {
+ if v, ok := m.GetValue().(*Exemplar_AsDouble); ok {
+ return v.AsDouble
+ }
+ return float64(0)
+}
+
+type Exemplar_AsInt struct {
+ AsInt int64
+}
+
+func (m *Exemplar) GetAsInt() int64 {
+ if v, ok := m.GetValue().(*Exemplar_AsInt); ok {
+ return v.AsInt
+ }
+ return int64(0)
+}
+
+// Exemplar is a sample input double measurement.
+//
+// Exemplars also hold information about the environment when the measurement was recorded,
+// for example the span and trace ID of the active span when the exemplar was recorded.
+type Exemplar struct {
+ FilteredAttributes []KeyValue
+ TimeUnixNano uint64
+ Value any
+ TraceId TraceID
+ SpanId SpanID
+}
+
var (
protoPoolExemplar = sync.Pool{
New: func() any {
- return &otlpmetrics.Exemplar{}
+ return &Exemplar{}
},
}
ProtoPoolExemplar_AsDouble = sync.Pool{
New: func() any {
- return &otlpmetrics.Exemplar_AsDouble{}
+ return &Exemplar_AsDouble{}
},
}
ProtoPoolExemplar_AsInt = sync.Pool{
New: func() any {
- return &otlpmetrics.Exemplar_AsInt{}
+ return &Exemplar_AsInt{}
},
}
)
-func NewOrigExemplar() *otlpmetrics.Exemplar {
+func NewExemplar() *Exemplar {
if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.Exemplar{}
+ return &Exemplar{}
}
- return protoPoolExemplar.Get().(*otlpmetrics.Exemplar)
+ return protoPoolExemplar.Get().(*Exemplar)
}
-func DeleteOrigExemplar(orig *otlpmetrics.Exemplar, nullable bool) {
+func DeleteExemplar(orig *Exemplar, nullable bool) {
if orig == nil {
return
}
@@ -57,23 +95,23 @@ func DeleteOrigExemplar(orig *otlpmetrics.Exemplar, nullable bool) {
}
for i := range orig.FilteredAttributes {
- DeleteOrigKeyValue(&orig.FilteredAttributes[i], false)
+ DeleteKeyValue(&orig.FilteredAttributes[i], false)
}
switch ov := orig.Value.(type) {
- case *otlpmetrics.Exemplar_AsDouble:
+ case *Exemplar_AsDouble:
if UseProtoPooling.IsEnabled() {
ov.AsDouble = float64(0)
ProtoPoolExemplar_AsDouble.Put(ov)
}
- case *otlpmetrics.Exemplar_AsInt:
+ case *Exemplar_AsInt:
if UseProtoPooling.IsEnabled() {
ov.AsInt = int64(0)
ProtoPoolExemplar_AsInt.Put(ov)
}
}
- DeleteOrigSpanID(&orig.SpanId, false)
- DeleteOrigTraceID(&orig.TraceId, false)
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
orig.Reset()
if nullable {
@@ -81,57 +119,114 @@ func DeleteOrigExemplar(orig *otlpmetrics.Exemplar, nullable bool) {
}
}
-func CopyOrigExemplar(dest, src *otlpmetrics.Exemplar) {
+func CopyExemplar(dest, src *Exemplar) *Exemplar {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
}
- dest.FilteredAttributes = CopyOrigKeyValueSlice(dest.FilteredAttributes, src.FilteredAttributes)
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExemplar()
+ }
+ dest.FilteredAttributes = CopyKeyValueSlice(dest.FilteredAttributes, src.FilteredAttributes)
+
dest.TimeUnixNano = src.TimeUnixNano
+
switch t := src.Value.(type) {
- case *otlpmetrics.Exemplar_AsDouble:
- var ov *otlpmetrics.Exemplar_AsDouble
+ case *Exemplar_AsDouble:
+ var ov *Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Exemplar_AsDouble{}
+ ov = &Exemplar_AsDouble{}
} else {
- ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble)
+ ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
}
ov.AsDouble = t.AsDouble
dest.Value = ov
- case *otlpmetrics.Exemplar_AsInt:
- var ov *otlpmetrics.Exemplar_AsInt
+ case *Exemplar_AsInt:
+ var ov *Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Exemplar_AsInt{}
+ ov = &Exemplar_AsInt{}
} else {
- ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt)
+ ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
}
ov.AsInt = t.AsInt
dest.Value = ov
+ default:
+ dest.Value = nil
}
- dest.SpanId = src.SpanId
- dest.TraceId = src.TraceId
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ return dest
}
-func GenTestOrigExemplar() *otlpmetrics.Exemplar {
- orig := NewOrigExemplar()
- orig.FilteredAttributes = GenerateOrigTestKeyValueSlice()
- orig.TimeUnixNano = 1234567890
- orig.Value = &otlpmetrics.Exemplar_AsInt{AsInt: int64(13)}
- orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
- orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
- return orig
+func CopyExemplarSlice(dest, src []Exemplar) []Exemplar {
+ var newDest []Exemplar
+ if cap(dest) < len(src) {
+ newDest = make([]Exemplar, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExemplar(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExemplar(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExemplarPtrSlice(dest, src []*Exemplar) []*Exemplar {
+ var newDest []*Exemplar
+ if cap(dest) < len(src) {
+ newDest = make([]*Exemplar, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExemplar()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExemplar(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExemplar()
+ }
+ }
+ for i := range src {
+ CopyExemplar(newDest[i], src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExemplar(orig *otlpmetrics.Exemplar, dest *json.Stream) {
+func (orig *Exemplar) Reset() {
+ *orig = Exemplar{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Exemplar) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.FilteredAttributes) > 0 {
dest.WriteObjectField("filteredAttributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.FilteredAttributes[0], dest)
+ orig.FilteredAttributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.FilteredAttributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.FilteredAttributes[i], dest)
+ orig.FilteredAttributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -140,32 +235,32 @@ func MarshalJSONOrigExemplar(orig *otlpmetrics.Exemplar, dest *json.Stream) {
dest.WriteUint64(orig.TimeUnixNano)
}
switch orig := orig.Value.(type) {
- case *otlpmetrics.Exemplar_AsDouble:
+ case *Exemplar_AsDouble:
dest.WriteObjectField("asDouble")
dest.WriteFloat64(orig.AsDouble)
- case *otlpmetrics.Exemplar_AsInt:
+ case *Exemplar_AsInt:
dest.WriteObjectField("asInt")
dest.WriteInt64(orig.AsInt)
}
- if orig.SpanId != data.SpanID([8]byte{}) {
- dest.WriteObjectField("spanId")
- MarshalJSONOrigSpanID(&orig.SpanId, dest)
- }
- if orig.TraceId != data.TraceID([16]byte{}) {
+ if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
- MarshalJSONOrigTraceID(&orig.TraceId, dest)
+ orig.TraceId.MarshalJSON(dest)
+ }
+ if !orig.SpanId.IsEmpty() {
+ dest.WriteObjectField("spanId")
+ orig.SpanId.MarshalJSON(dest)
}
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigExemplar unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExemplar(orig *otlpmetrics.Exemplar, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Exemplar) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "filteredAttributes", "filtered_attributes":
for iter.ReadArray() {
- orig.FilteredAttributes = append(orig.FilteredAttributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.FilteredAttributes[len(orig.FilteredAttributes)-1], iter)
+ orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{})
+ orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalJSON(iter)
}
case "timeUnixNano", "time_unix_nano":
@@ -173,11 +268,11 @@ func UnmarshalJSONOrigExemplar(orig *otlpmetrics.Exemplar, iter *json.Iterator)
case "asDouble", "as_double":
{
- var ov *otlpmetrics.Exemplar_AsDouble
+ var ov *Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Exemplar_AsDouble{}
+ ov = &Exemplar_AsDouble{}
} else {
- ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble)
+ ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
}
ov.AsDouble = iter.ReadFloat64()
orig.Value = ov
@@ -185,32 +280,34 @@ func UnmarshalJSONOrigExemplar(orig *otlpmetrics.Exemplar, iter *json.Iterator)
case "asInt", "as_int":
{
- var ov *otlpmetrics.Exemplar_AsInt
+ var ov *Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Exemplar_AsInt{}
+ ov = &Exemplar_AsInt{}
} else {
- ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt)
+ ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
}
ov.AsInt = iter.ReadInt64()
orig.Value = ov
}
- case "spanId", "span_id":
- UnmarshalJSONOrigSpanID(&orig.SpanId, iter)
case "traceId", "trace_id":
- UnmarshalJSONOrigTraceID(&orig.TraceId, iter)
+
+ orig.TraceId.UnmarshalJSON(iter)
+ case "spanId", "span_id":
+
+ orig.SpanId.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
-func SizeProtoOrigExemplar(orig *otlpmetrics.Exemplar) int {
+func (orig *Exemplar) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.FilteredAttributes {
- l = SizeProtoOrigKeyValue(&orig.FilteredAttributes[i])
+ l = orig.FilteredAttributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.TimeUnixNano != 0 {
@@ -220,24 +317,24 @@ func SizeProtoOrigExemplar(orig *otlpmetrics.Exemplar) int {
case nil:
_ = orig
break
- case *otlpmetrics.Exemplar_AsDouble:
+ case *Exemplar_AsDouble:
n += 9
- case *otlpmetrics.Exemplar_AsInt:
+ case *Exemplar_AsInt:
n += 9
}
- l = SizeProtoOrigSpanID(&orig.SpanId)
+ l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
- l = SizeProtoOrigTraceID(&orig.TraceId)
+ l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
-func MarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) int {
+func (orig *Exemplar) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.FilteredAttributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.FilteredAttributes[i], buf[:pos])
+ l = orig.FilteredAttributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -250,36 +347,35 @@ func MarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) int {
buf[pos] = 0x11
}
switch orig := orig.Value.(type) {
- case *otlpmetrics.Exemplar_AsDouble:
+ case *Exemplar_AsDouble:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
pos--
buf[pos] = 0x19
- case *otlpmetrics.Exemplar_AsInt:
+ case *Exemplar_AsInt:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
pos--
buf[pos] = 0x31
}
-
- l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos])
+ l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
- buf[pos] = 0x22
+ buf[pos] = 0x2a
- l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos])
+ l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
- buf[pos] = 0x2a
+ buf[pos] = 0x22
return len(buf) - pos
}
-func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
+func (orig *Exemplar) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -304,8 +400,8 @@ func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
return err
}
startPos := pos - length
- orig.FilteredAttributes = append(orig.FilteredAttributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.FilteredAttributes[len(orig.FilteredAttributes)-1], buf[startPos:pos])
+ orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{})
+ err = orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -331,11 +427,11 @@ func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
if err != nil {
return err
}
- var ov *otlpmetrics.Exemplar_AsDouble
+ var ov *Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Exemplar_AsDouble{}
+ ov = &Exemplar_AsDouble{}
} else {
- ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble)
+ ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
}
ov.AsDouble = math.Float64frombits(num)
orig.Value = ov
@@ -349,18 +445,18 @@ func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
if err != nil {
return err
}
- var ov *otlpmetrics.Exemplar_AsInt
+ var ov *Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Exemplar_AsInt{}
+ ov = &Exemplar_AsInt{}
} else {
- ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt)
+ ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
}
ov.AsInt = int64(num)
orig.Value = ov
- case 4:
+ case 5:
if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
@@ -369,14 +465,14 @@ func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos])
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
- case 5:
+ case 4:
if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
@@ -385,7 +481,7 @@ func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos])
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -398,3 +494,30 @@ func UnmarshalProtoOrigExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
}
return nil
}
+
+func GenTestExemplar() *Exemplar {
+ orig := NewExemplar()
+ orig.FilteredAttributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.TimeUnixNano = uint64(13)
+ orig.Value = &Exemplar_AsDouble{AsDouble: float64(3.1415926)}
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ return orig
+}
+
+func GenTestExemplarPtrSlice() []*Exemplar {
+ orig := make([]*Exemplar, 5)
+ orig[0] = NewExemplar()
+ orig[1] = GenTestExemplar()
+ orig[2] = NewExemplar()
+ orig[3] = GenTestExemplar()
+ orig[4] = NewExemplar()
+ return orig
+}
+
+func GenTestExemplarSlice() []Exemplar {
+ orig := make([]Exemplar, 5)
+ orig[1] = *GenTestExemplar()
+ orig[3] = *GenTestExemplar()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go
new file mode 100644
index 000000000..926816346
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go
@@ -0,0 +1,277 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExponentialHistogram represents the type of a metric that is calculated by aggregating
+// as a ExponentialHistogram of all reported double measurements over a time interval.
+type ExponentialHistogram struct {
+ DataPoints []*ExponentialHistogramDataPoint
+ AggregationTemporality AggregationTemporality
+}
+
+var (
+ protoPoolExponentialHistogram = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogram{}
+ },
+ }
+)
+
+func NewExponentialHistogram() *ExponentialHistogram {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExponentialHistogram{}
+ }
+ return protoPoolExponentialHistogram.Get().(*ExponentialHistogram)
+}
+
+func DeleteExponentialHistogram(orig *ExponentialHistogram, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteExponentialHistogramDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExponentialHistogram.Put(orig)
+ }
+}
+
+func CopyExponentialHistogram(dest, src *ExponentialHistogram) *ExponentialHistogram {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExponentialHistogram()
+ }
+ dest.DataPoints = CopyExponentialHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ dest.AggregationTemporality = src.AggregationTemporality
+
+ return dest
+}
+
+func CopyExponentialHistogramSlice(dest, src []ExponentialHistogram) []ExponentialHistogram {
+ var newDest []ExponentialHistogram
+ if cap(dest) < len(src) {
+ newDest = make([]ExponentialHistogram, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogram(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogram(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExponentialHistogramPtrSlice(dest, src []*ExponentialHistogram) []*ExponentialHistogram {
+ var newDest []*ExponentialHistogram
+ if cap(dest) < len(src) {
+ newDest = make([]*ExponentialHistogram, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogram()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogram(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogram()
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogram(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExponentialHistogram) Reset() {
+ *orig = ExponentialHistogram{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExponentialHistogram) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+
+ if int32(orig.AggregationTemporality) != 0 {
+ dest.WriteObjectField("aggregationTemporality")
+ dest.WriteInt32(int32(orig.AggregationTemporality))
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExponentialHistogram) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ case "aggregationTemporality", "aggregation_temporality":
+ orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExponentialHistogram) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.AggregationTemporality != 0 {
+ n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
+ }
+ return n
+}
+
+func (orig *ExponentialHistogram) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.AggregationTemporality != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
+ pos--
+ buf[pos] = 0x10
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExponentialHistogram) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.AggregationTemporality = AggregationTemporality(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExponentialHistogram() *ExponentialHistogram {
+ orig := NewExponentialHistogram()
+ orig.DataPoints = []*ExponentialHistogramDataPoint{{}, GenTestExponentialHistogramDataPoint()}
+ orig.AggregationTemporality = AggregationTemporality(13)
+ return orig
+}
+
+func GenTestExponentialHistogramPtrSlice() []*ExponentialHistogram {
+ orig := make([]*ExponentialHistogram, 5)
+ orig[0] = NewExponentialHistogram()
+ orig[1] = GenTestExponentialHistogram()
+ orig[2] = NewExponentialHistogram()
+ orig[3] = GenTestExponentialHistogram()
+ orig[4] = NewExponentialHistogram()
+ return orig
+}
+
+func GenTestExponentialHistogramSlice() []ExponentialHistogram {
+ orig := make([]ExponentialHistogram, 5)
+ orig[1] = *GenTestExponentialHistogram()
+ orig[3] = *GenTestExponentialHistogram()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go
similarity index 52%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go
index 29bc0011c..d16c0120e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go
@@ -12,45 +12,118 @@ import (
"math"
"sync"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+func (m *ExponentialHistogramDataPoint) GetSum_() any {
+ if m != nil {
+ return m.Sum_
+ }
+ return nil
+}
+
+type ExponentialHistogramDataPoint_Sum struct {
+ Sum float64
+}
+
+func (m *ExponentialHistogramDataPoint) GetSum() float64 {
+ if v, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok {
+ return v.Sum
+ }
+ return float64(0)
+}
+
+func (m *ExponentialHistogramDataPoint) GetMin_() any {
+ if m != nil {
+ return m.Min_
+ }
+ return nil
+}
+
+type ExponentialHistogramDataPoint_Min struct {
+ Min float64
+}
+
+func (m *ExponentialHistogramDataPoint) GetMin() float64 {
+ if v, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok {
+ return v.Min
+ }
+ return float64(0)
+}
+
+func (m *ExponentialHistogramDataPoint) GetMax_() any {
+ if m != nil {
+ return m.Max_
+ }
+ return nil
+}
+
+type ExponentialHistogramDataPoint_Max struct {
+ Max float64
+}
+
+func (m *ExponentialHistogramDataPoint) GetMax() float64 {
+ if v, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok {
+ return v.Max
+ }
+ return float64(0)
+}
+
+// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
+// summary statistics for a population of values, it may optionally contain the
+// distribution of those values across a set of buckets.
+type ExponentialHistogramDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Count uint64
+ Sum_ any
+ Scale int32
+ ZeroCount uint64
+ Positive ExponentialHistogramDataPointBuckets
+ Negative ExponentialHistogramDataPointBuckets
+ Flags uint32
+ Exemplars []Exemplar
+ Min_ any
+ Max_ any
+ ZeroThreshold float64
+}
+
var (
protoPoolExponentialHistogramDataPoint = sync.Pool{
New: func() any {
- return &otlpmetrics.ExponentialHistogramDataPoint{}
+ return &ExponentialHistogramDataPoint{}
},
}
ProtoPoolExponentialHistogramDataPoint_Sum = sync.Pool{
New: func() any {
- return &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
+ return &ExponentialHistogramDataPoint_Sum{}
},
}
ProtoPoolExponentialHistogramDataPoint_Min = sync.Pool{
New: func() any {
- return &otlpmetrics.ExponentialHistogramDataPoint_Min{}
+ return &ExponentialHistogramDataPoint_Min{}
},
}
ProtoPoolExponentialHistogramDataPoint_Max = sync.Pool{
New: func() any {
- return &otlpmetrics.ExponentialHistogramDataPoint_Max{}
+ return &ExponentialHistogramDataPoint_Max{}
},
}
)
-func NewOrigExponentialHistogramDataPoint() *otlpmetrics.ExponentialHistogramDataPoint {
+func NewExponentialHistogramDataPoint() *ExponentialHistogramDataPoint {
if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.ExponentialHistogramDataPoint{}
+ return &ExponentialHistogramDataPoint{}
}
- return protoPoolExponentialHistogramDataPoint.Get().(*otlpmetrics.ExponentialHistogramDataPoint)
+ return protoPoolExponentialHistogramDataPoint.Get().(*ExponentialHistogramDataPoint)
}
-func DeleteOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, nullable bool) {
+func DeleteExponentialHistogramDataPoint(orig *ExponentialHistogramDataPoint, nullable bool) {
if orig == nil {
return
}
@@ -61,23 +134,23 @@ func DeleteOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistog
}
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Sum_.(type) {
- case *otlpmetrics.ExponentialHistogramDataPoint_Sum:
+ case *ExponentialHistogramDataPoint_Sum:
if UseProtoPooling.IsEnabled() {
ov.Sum = float64(0)
ProtoPoolExponentialHistogramDataPoint_Sum.Put(ov)
}
}
- DeleteOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, false)
- DeleteOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, false)
+ DeleteExponentialHistogramDataPointBuckets(&orig.Positive, false)
+ DeleteExponentialHistogramDataPointBuckets(&orig.Negative, false)
for i := range orig.Exemplars {
- DeleteOrigExemplar(&orig.Exemplars[i], false)
+ DeleteExemplar(&orig.Exemplars[i], false)
}
switch ov := orig.Min_.(type) {
- case *otlpmetrics.ExponentialHistogramDataPoint_Min:
+ case *ExponentialHistogramDataPoint_Min:
if UseProtoPooling.IsEnabled() {
ov.Min = float64(0)
ProtoPoolExponentialHistogramDataPoint_Min.Put(ov)
@@ -85,7 +158,7 @@ func DeleteOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistog
}
switch ov := orig.Max_.(type) {
- case *otlpmetrics.ExponentialHistogramDataPoint_Max:
+ case *ExponentialHistogramDataPoint_Max:
if UseProtoPooling.IsEnabled() {
ov.Max = float64(0)
ProtoPoolExponentialHistogramDataPoint_Max.Put(ov)
@@ -99,83 +172,148 @@ func DeleteOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistog
}
}
-func CopyOrigExponentialHistogramDataPoint(dest, src *otlpmetrics.ExponentialHistogramDataPoint) {
+func CopyExponentialHistogramDataPoint(dest, src *ExponentialHistogramDataPoint) *ExponentialHistogramDataPoint {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
}
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExponentialHistogramDataPoint()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.StartTimeUnixNano = src.StartTimeUnixNano
+
dest.TimeUnixNano = src.TimeUnixNano
+
dest.Count = src.Count
- if srcSum, ok := src.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
- destSum, ok := dest.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum)
- if !ok {
- destSum = &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
- dest.Sum_ = destSum
+
+ switch t := src.Sum_.(type) {
+ case *ExponentialHistogramDataPoint_Sum:
+ var ov *ExponentialHistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
}
- destSum.Sum = srcSum.Sum
- } else {
+ ov.Sum = t.Sum
+ dest.Sum_ = ov
+ default:
dest.Sum_ = nil
}
+
dest.Scale = src.Scale
+
dest.ZeroCount = src.ZeroCount
- CopyOrigExponentialHistogramDataPoint_Buckets(&dest.Positive, &src.Positive)
- CopyOrigExponentialHistogramDataPoint_Buckets(&dest.Negative, &src.Negative)
+
+ CopyExponentialHistogramDataPointBuckets(&dest.Positive, &src.Positive)
+
+ CopyExponentialHistogramDataPointBuckets(&dest.Negative, &src.Negative)
+
dest.Flags = src.Flags
- dest.Exemplars = CopyOrigExemplarSlice(dest.Exemplars, src.Exemplars)
- if srcMin, ok := src.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
- destMin, ok := dest.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min)
- if !ok {
- destMin = &otlpmetrics.ExponentialHistogramDataPoint_Min{}
- dest.Min_ = destMin
+
+ dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
+
+ switch t := src.Min_.(type) {
+ case *ExponentialHistogramDataPoint_Min:
+ var ov *ExponentialHistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
}
- destMin.Min = srcMin.Min
- } else {
+ ov.Min = t.Min
+ dest.Min_ = ov
+ default:
dest.Min_ = nil
}
- if srcMax, ok := src.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
- destMax, ok := dest.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max)
- if !ok {
- destMax = &otlpmetrics.ExponentialHistogramDataPoint_Max{}
- dest.Max_ = destMax
+
+ switch t := src.Max_.(type) {
+ case *ExponentialHistogramDataPoint_Max:
+ var ov *ExponentialHistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
}
- destMax.Max = srcMax.Max
- } else {
+ ov.Max = t.Max
+ dest.Max_ = ov
+ default:
dest.Max_ = nil
}
+
dest.ZeroThreshold = src.ZeroThreshold
+
+ return dest
}
-func GenTestOrigExponentialHistogramDataPoint() *otlpmetrics.ExponentialHistogramDataPoint {
- orig := NewOrigExponentialHistogramDataPoint()
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.StartTimeUnixNano = 1234567890
- orig.TimeUnixNano = 1234567890
- orig.Count = uint64(13)
- orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: float64(3.1415926)}
- orig.Scale = int32(13)
- orig.ZeroCount = uint64(13)
- orig.Positive = *GenTestOrigExponentialHistogramDataPoint_Buckets()
- orig.Negative = *GenTestOrigExponentialHistogramDataPoint_Buckets()
- orig.Flags = 1
- orig.Exemplars = GenerateOrigTestExemplarSlice()
- orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: float64(3.1415926)}
- orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: float64(3.1415926)}
- orig.ZeroThreshold = float64(3.1415926)
- return orig
+func CopyExponentialHistogramDataPointSlice(dest, src []ExponentialHistogramDataPoint) []ExponentialHistogramDataPoint {
+ var newDest []ExponentialHistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]ExponentialHistogramDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, dest *json.Stream) {
+func CopyExponentialHistogramDataPointPtrSlice(dest, src []*ExponentialHistogramDataPoint) []*ExponentialHistogramDataPoint {
+ var newDest []*ExponentialHistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*ExponentialHistogramDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPoint()
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPoint(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExponentialHistogramDataPoint) Reset() {
+ *orig = ExponentialHistogramDataPoint{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExponentialHistogramDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -191,7 +329,7 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
- if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
+ if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
@@ -204,9 +342,9 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH
dest.WriteUint64(orig.ZeroCount)
}
dest.WriteObjectField("positive")
- MarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, dest)
+ orig.Positive.MarshalJSON(dest)
dest.WriteObjectField("negative")
- MarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, dest)
+ orig.Negative.MarshalJSON(dest)
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
@@ -214,18 +352,18 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
- MarshalJSONOrigExemplar(&orig.Exemplars[0], dest)
+ orig.Exemplars[0].MarshalJSON(dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
- MarshalJSONOrigExemplar(&orig.Exemplars[i], dest)
+ orig.Exemplars[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
- if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
+ if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
dest.WriteObjectField("min")
dest.WriteFloat64(orig.Min)
}
- if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
+ if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
dest.WriteObjectField("max")
dest.WriteFloat64(orig.Max)
}
@@ -236,14 +374,14 @@ func MarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialH
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigExponentialHistogramDataPoint unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExponentialHistogramDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
@@ -254,11 +392,11 @@ func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponentia
orig.Count = iter.ReadUint64()
case "sum":
{
- var ov *otlpmetrics.ExponentialHistogramDataPoint_Sum
+ var ov *ExponentialHistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
+ ov = &ExponentialHistogramDataPoint_Sum{}
} else {
- ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Sum)
+ ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
}
ov.Sum = iter.ReadFloat64()
orig.Sum_ = ov
@@ -269,24 +407,26 @@ func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponentia
case "zeroCount", "zero_count":
orig.ZeroCount = iter.ReadUint64()
case "positive":
- UnmarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, iter)
+
+ orig.Positive.UnmarshalJSON(iter)
case "negative":
- UnmarshalJSONOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, iter)
+
+ orig.Negative.UnmarshalJSON(iter)
case "flags":
orig.Flags = iter.ReadUint32()
case "exemplars":
for iter.ReadArray() {
- orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
- UnmarshalJSONOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter)
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
}
case "min":
{
- var ov *otlpmetrics.ExponentialHistogramDataPoint_Min
+ var ov *ExponentialHistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.ExponentialHistogramDataPoint_Min{}
+ ov = &ExponentialHistogramDataPoint_Min{}
} else {
- ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Min)
+ ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
}
ov.Min = iter.ReadFloat64()
orig.Min_ = ov
@@ -294,11 +434,11 @@ func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponentia
case "max":
{
- var ov *otlpmetrics.ExponentialHistogramDataPoint_Max
+ var ov *ExponentialHistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.ExponentialHistogramDataPoint_Max{}
+ ov = &ExponentialHistogramDataPoint_Max{}
} else {
- ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Max)
+ ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
}
ov.Max = iter.ReadFloat64()
orig.Max_ = ov
@@ -312,12 +452,12 @@ func UnmarshalJSONOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponentia
}
}
-func SizeProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint) int {
+func (orig *ExponentialHistogramDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
@@ -329,7 +469,7 @@ func SizeProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHis
if orig.Count != 0 {
n += 9
}
- if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
+ if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
_ = orig
n += 9
}
@@ -339,22 +479,22 @@ func SizeProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHis
if orig.ZeroCount != 0 {
n += 9
}
- l = SizeProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Positive)
+ l = orig.Positive.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
- l = SizeProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Negative)
+ l = orig.Negative.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
for i := range orig.Exemplars {
- l = SizeProtoOrigExemplar(&orig.Exemplars[i])
+ l = orig.Exemplars[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
- if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
+ if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
_ = orig
n += 9
}
- if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
+ if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
_ = orig
n += 9
}
@@ -364,12 +504,12 @@ func SizeProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHis
return n
}
-func MarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, buf []byte) int {
+func (orig *ExponentialHistogramDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -393,7 +533,7 @@ func MarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponential
pos--
buf[pos] = 0x21
}
- if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
+ if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
@@ -410,14 +550,13 @@ func MarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponential
pos--
buf[pos] = 0x39
}
-
- l = MarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, buf[:pos])
+ l = orig.Positive.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x42
- l = MarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, buf[:pos])
+ l = orig.Negative.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -429,19 +568,19 @@ func MarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponential
buf[pos] = 0x50
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
- l = MarshalProtoOrigExemplar(&orig.Exemplars[i], buf[:pos])
+ l = orig.Exemplars[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
- if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
+ if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
pos--
buf[pos] = 0x61
}
- if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
+ if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
pos--
@@ -456,7 +595,7 @@ func MarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponential
return len(buf) - pos
}
-func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, buf []byte) error {
+func (orig *ExponentialHistogramDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -481,8 +620,8 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -532,11 +671,11 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
if err != nil {
return err
}
- var ov *otlpmetrics.ExponentialHistogramDataPoint_Sum
+ var ov *ExponentialHistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
+ ov = &ExponentialHistogramDataPoint_Sum{}
} else {
- ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Sum)
+ ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
}
ov.Sum = math.Float64frombits(num)
orig.Sum_ = ov
@@ -576,7 +715,7 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
}
startPos := pos - length
- err = UnmarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Positive, buf[startPos:pos])
+ err = orig.Positive.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -592,7 +731,7 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
}
startPos := pos - length
- err = UnmarshalProtoOrigExponentialHistogramDataPoint_Buckets(&orig.Negative, buf[startPos:pos])
+ err = orig.Negative.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -619,8 +758,8 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
return err
}
startPos := pos - length
- orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
- err = UnmarshalProtoOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos])
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -634,11 +773,11 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
if err != nil {
return err
}
- var ov *otlpmetrics.ExponentialHistogramDataPoint_Min
+ var ov *ExponentialHistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.ExponentialHistogramDataPoint_Min{}
+ ov = &ExponentialHistogramDataPoint_Min{}
} else {
- ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Min)
+ ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
}
ov.Min = math.Float64frombits(num)
orig.Min_ = ov
@@ -652,11 +791,11 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
if err != nil {
return err
}
- var ov *otlpmetrics.ExponentialHistogramDataPoint_Max
+ var ov *ExponentialHistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.ExponentialHistogramDataPoint_Max{}
+ ov = &ExponentialHistogramDataPoint_Max{}
} else {
- ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Max)
+ ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
}
ov.Max = math.Float64frombits(num)
orig.Max_ = ov
@@ -681,3 +820,39 @@ func UnmarshalProtoOrigExponentialHistogramDataPoint(orig *otlpmetrics.Exponenti
}
return nil
}
+
+func GenTestExponentialHistogramDataPoint() *ExponentialHistogramDataPoint {
+ orig := NewExponentialHistogramDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Count = uint64(13)
+ orig.Sum_ = &ExponentialHistogramDataPoint_Sum{Sum: float64(3.1415926)}
+ orig.Scale = int32(13)
+ orig.ZeroCount = uint64(13)
+ orig.Positive = *GenTestExponentialHistogramDataPointBuckets()
+ orig.Negative = *GenTestExponentialHistogramDataPointBuckets()
+ orig.Flags = uint32(13)
+ orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
+ orig.Min_ = &ExponentialHistogramDataPoint_Min{Min: float64(3.1415926)}
+ orig.Max_ = &ExponentialHistogramDataPoint_Max{Max: float64(3.1415926)}
+ orig.ZeroThreshold = float64(3.1415926)
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointPtrSlice() []*ExponentialHistogramDataPoint {
+ orig := make([]*ExponentialHistogramDataPoint, 5)
+ orig[0] = NewExponentialHistogramDataPoint()
+ orig[1] = GenTestExponentialHistogramDataPoint()
+ orig[2] = NewExponentialHistogramDataPoint()
+ orig[3] = GenTestExponentialHistogramDataPoint()
+ orig[4] = NewExponentialHistogramDataPoint()
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointSlice() []ExponentialHistogramDataPoint {
+ orig := make([]ExponentialHistogramDataPoint, 5)
+ orig[1] = *GenTestExponentialHistogramDataPoint()
+ orig[3] = *GenTestExponentialHistogramDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go
new file mode 100644
index 000000000..459dfe0c3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go
@@ -0,0 +1,290 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts.
+type ExponentialHistogramDataPointBuckets struct {
+ Offset int32
+ BucketCounts []uint64
+}
+
+var (
+ protoPoolExponentialHistogramDataPointBuckets = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogramDataPointBuckets{}
+ },
+ }
+)
+
+func NewExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExponentialHistogramDataPointBuckets{}
+ }
+ return protoPoolExponentialHistogramDataPointBuckets.Get().(*ExponentialHistogramDataPointBuckets)
+}
+
+func DeleteExponentialHistogramDataPointBuckets(orig *ExponentialHistogramDataPointBuckets, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExponentialHistogramDataPointBuckets.Put(orig)
+ }
+}
+
+func CopyExponentialHistogramDataPointBuckets(dest, src *ExponentialHistogramDataPointBuckets) *ExponentialHistogramDataPointBuckets {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExponentialHistogramDataPointBuckets()
+ }
+ dest.Offset = src.Offset
+
+ dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...)
+
+ return dest
+}
+
+func CopyExponentialHistogramDataPointBucketsSlice(dest, src []ExponentialHistogramDataPointBuckets) []ExponentialHistogramDataPointBuckets {
+ var newDest []ExponentialHistogramDataPointBuckets
+ if cap(dest) < len(src) {
+ newDest = make([]ExponentialHistogramDataPointBuckets, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPointBuckets(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPointBuckets(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExponentialHistogramDataPointBucketsPtrSlice(dest, src []*ExponentialHistogramDataPointBuckets) []*ExponentialHistogramDataPointBuckets {
+ var newDest []*ExponentialHistogramDataPointBuckets
+ if cap(dest) < len(src) {
+ newDest = make([]*ExponentialHistogramDataPointBuckets, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPointBuckets()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPointBuckets(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPointBuckets()
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPointBuckets(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) Reset() {
+ *orig = ExponentialHistogramDataPointBuckets{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExponentialHistogramDataPointBuckets) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Offset != int32(0) {
+ dest.WriteObjectField("offset")
+ dest.WriteInt32(orig.Offset)
+ }
+ if len(orig.BucketCounts) > 0 {
+ dest.WriteObjectField("bucketCounts")
+ dest.WriteArrayStart()
+ dest.WriteUint64(orig.BucketCounts[0])
+ for i := 1; i < len(orig.BucketCounts); i++ {
+ dest.WriteMore()
+ dest.WriteUint64(orig.BucketCounts[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExponentialHistogramDataPointBuckets) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "offset":
+ orig.Offset = iter.ReadInt32()
+ case "bucketCounts", "bucket_counts":
+ for iter.ReadArray() {
+ orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.Offset != 0 {
+ n += 1 + proto.Soz(uint64(orig.Offset))
+ }
+ if len(orig.BucketCounts) > 0 {
+ l = 0
+ for _, e := range orig.BucketCounts {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.Offset != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Offset)<<1)^uint32(orig.Offset>>31)))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.BucketCounts)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.BucketCounts[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Offset = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
+ case 2:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts = append(orig.BucketCounts, uint64(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts = append(orig.BucketCounts, uint64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets {
+ orig := NewExponentialHistogramDataPointBuckets()
+ orig.Offset = int32(13)
+ orig.BucketCounts = []uint64{uint64(0), uint64(13)}
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointBucketsPtrSlice() []*ExponentialHistogramDataPointBuckets {
+ orig := make([]*ExponentialHistogramDataPointBuckets, 5)
+ orig[0] = NewExponentialHistogramDataPointBuckets()
+ orig[1] = GenTestExponentialHistogramDataPointBuckets()
+ orig[2] = NewExponentialHistogramDataPointBuckets()
+ orig[3] = GenTestExponentialHistogramDataPointBuckets()
+ orig[4] = NewExponentialHistogramDataPointBuckets()
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointBucketsSlice() []ExponentialHistogramDataPointBuckets {
+ orig := make([]ExponentialHistogramDataPointBuckets, 5)
+ orig[1] = *GenTestExponentialHistogramDataPointBuckets()
+ orig[3] = *GenTestExponentialHistogramDataPointBuckets()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go
new file mode 100644
index 000000000..f93525d34
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportLogsPartialSuccess struct {
+ RejectedLogRecords int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportLogsPartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportLogsPartialSuccess{}
+ },
+ }
+)
+
+func NewExportLogsPartialSuccess() *ExportLogsPartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportLogsPartialSuccess{}
+ }
+ return protoPoolExportLogsPartialSuccess.Get().(*ExportLogsPartialSuccess)
+}
+
+func DeleteExportLogsPartialSuccess(orig *ExportLogsPartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportLogsPartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportLogsPartialSuccess(dest, src *ExportLogsPartialSuccess) *ExportLogsPartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportLogsPartialSuccess()
+ }
+ dest.RejectedLogRecords = src.RejectedLogRecords
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportLogsPartialSuccessSlice(dest, src []ExportLogsPartialSuccess) []ExportLogsPartialSuccess {
+ var newDest []ExportLogsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportLogsPartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsPartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportLogsPartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportLogsPartialSuccessPtrSlice(dest, src []*ExportLogsPartialSuccess) []*ExportLogsPartialSuccess {
+ var newDest []*ExportLogsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportLogsPartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsPartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsPartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsPartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportLogsPartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportLogsPartialSuccess) Reset() {
+ *orig = ExportLogsPartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportLogsPartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedLogRecords != int64(0) {
+ dest.WriteObjectField("rejectedLogRecords")
+ dest.WriteInt64(orig.RejectedLogRecords)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportLogsPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedLogRecords", "rejected_log_records":
+ orig.RejectedLogRecords = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportLogsPartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedLogRecords != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedLogRecords))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportLogsPartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedLogRecords != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedLogRecords))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportLogsPartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedLogRecords = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportLogsPartialSuccess() *ExportLogsPartialSuccess {
+ orig := NewExportLogsPartialSuccess()
+ orig.RejectedLogRecords = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportLogsPartialSuccessPtrSlice() []*ExportLogsPartialSuccess {
+ orig := make([]*ExportLogsPartialSuccess, 5)
+ orig[0] = NewExportLogsPartialSuccess()
+ orig[1] = GenTestExportLogsPartialSuccess()
+ orig[2] = NewExportLogsPartialSuccess()
+ orig[3] = GenTestExportLogsPartialSuccess()
+ orig[4] = NewExportLogsPartialSuccess()
+ return orig
+}
+
+func GenTestExportLogsPartialSuccessSlice() []ExportLogsPartialSuccess {
+ orig := make([]ExportLogsPartialSuccess, 5)
+ orig[1] = *GenTestExportLogsPartialSuccess()
+ orig[3] = *GenTestExportLogsPartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go
new file mode 100644
index 000000000..1c849ec22
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Logs is the top-level struct that is propagated through the logs pipeline.
+// Use NewLogs to create new instance, zero-initialized instance is not valid for use.
+type ExportLogsServiceRequest struct {
+ ResourceLogs []*ResourceLogs
+}
+
+var (
+ protoPoolExportLogsServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportLogsServiceRequest{}
+ },
+ }
+)
+
+func NewExportLogsServiceRequest() *ExportLogsServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportLogsServiceRequest{}
+ }
+ return protoPoolExportLogsServiceRequest.Get().(*ExportLogsServiceRequest)
+}
+
+func DeleteExportLogsServiceRequest(orig *ExportLogsServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceLogs {
+ DeleteResourceLogs(orig.ResourceLogs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportLogsServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportLogsServiceRequest(dest, src *ExportLogsServiceRequest) *ExportLogsServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportLogsServiceRequest()
+ }
+ dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs)
+
+ return dest
+}
+
+func CopyExportLogsServiceRequestSlice(dest, src []ExportLogsServiceRequest) []ExportLogsServiceRequest {
+ var newDest []ExportLogsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportLogsServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportLogsServiceRequestPtrSlice(dest, src []*ExportLogsServiceRequest) []*ExportLogsServiceRequest {
+ var newDest []*ExportLogsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportLogsServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportLogsServiceRequest) Reset() {
+ *orig = ExportLogsServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportLogsServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceLogs) > 0 {
+ dest.WriteObjectField("resourceLogs")
+ dest.WriteArrayStart()
+ orig.ResourceLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceLogs); i++ {
+ dest.WriteMore()
+ orig.ResourceLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportLogsServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceLogs", "resource_logs":
+ for iter.ReadArray() {
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportLogsServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceLogs {
+ l = orig.ResourceLogs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportLogsServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
+ l = orig.ResourceLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportLogsServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportLogsServiceRequest() *ExportLogsServiceRequest {
+ orig := NewExportLogsServiceRequest()
+ orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()}
+ return orig
+}
+
+func GenTestExportLogsServiceRequestPtrSlice() []*ExportLogsServiceRequest {
+ orig := make([]*ExportLogsServiceRequest, 5)
+ orig[0] = NewExportLogsServiceRequest()
+ orig[1] = GenTestExportLogsServiceRequest()
+ orig[2] = NewExportLogsServiceRequest()
+ orig[3] = GenTestExportLogsServiceRequest()
+ orig[4] = NewExportLogsServiceRequest()
+ return orig
+}
+
+func GenTestExportLogsServiceRequestSlice() []ExportLogsServiceRequest {
+ orig := make([]ExportLogsServiceRequest, 5)
+ orig[1] = *GenTestExportLogsServiceRequest()
+ orig[3] = *GenTestExportLogsServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go
new file mode 100644
index 000000000..7b78c7360
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportLogsServiceResponse struct {
+ PartialSuccess ExportLogsPartialSuccess
+}
+
+var (
+ protoPoolExportLogsServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportLogsServiceResponse{}
+ },
+ }
+)
+
+func NewExportLogsServiceResponse() *ExportLogsServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportLogsServiceResponse{}
+ }
+ return protoPoolExportLogsServiceResponse.Get().(*ExportLogsServiceResponse)
+}
+
+func DeleteExportLogsServiceResponse(orig *ExportLogsServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportLogsPartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportLogsServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportLogsServiceResponse(dest, src *ExportLogsServiceResponse) *ExportLogsServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportLogsServiceResponse()
+ }
+ CopyExportLogsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportLogsServiceResponseSlice(dest, src []ExportLogsServiceResponse) []ExportLogsServiceResponse {
+ var newDest []ExportLogsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportLogsServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportLogsServiceResponsePtrSlice(dest, src []*ExportLogsServiceResponse) []*ExportLogsServiceResponse {
+ var newDest []*ExportLogsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportLogsServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportLogsServiceResponse) Reset() {
+ *orig = ExportLogsServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportLogsServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportLogsServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportLogsServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportLogsServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportLogsServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportLogsServiceResponse() *ExportLogsServiceResponse {
+ orig := NewExportLogsServiceResponse()
+ orig.PartialSuccess = *GenTestExportLogsPartialSuccess()
+ return orig
+}
+
+func GenTestExportLogsServiceResponsePtrSlice() []*ExportLogsServiceResponse {
+ orig := make([]*ExportLogsServiceResponse, 5)
+ orig[0] = NewExportLogsServiceResponse()
+ orig[1] = GenTestExportLogsServiceResponse()
+ orig[2] = NewExportLogsServiceResponse()
+ orig[3] = GenTestExportLogsServiceResponse()
+ orig[4] = NewExportLogsServiceResponse()
+ return orig
+}
+
+func GenTestExportLogsServiceResponseSlice() []ExportLogsServiceResponse {
+ orig := make([]ExportLogsServiceResponse, 5)
+ orig[1] = *GenTestExportLogsServiceResponse()
+ orig[3] = *GenTestExportLogsServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go
new file mode 100644
index 000000000..ff18f954c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportMetricsPartialSuccess struct {
+ RejectedDataPoints int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportMetricsPartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportMetricsPartialSuccess{}
+ },
+ }
+)
+
+func NewExportMetricsPartialSuccess() *ExportMetricsPartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportMetricsPartialSuccess{}
+ }
+ return protoPoolExportMetricsPartialSuccess.Get().(*ExportMetricsPartialSuccess)
+}
+
+func DeleteExportMetricsPartialSuccess(orig *ExportMetricsPartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportMetricsPartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportMetricsPartialSuccess(dest, src *ExportMetricsPartialSuccess) *ExportMetricsPartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportMetricsPartialSuccess()
+ }
+ dest.RejectedDataPoints = src.RejectedDataPoints
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportMetricsPartialSuccessSlice(dest, src []ExportMetricsPartialSuccess) []ExportMetricsPartialSuccess {
+ var newDest []ExportMetricsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportMetricsPartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsPartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportMetricsPartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportMetricsPartialSuccessPtrSlice(dest, src []*ExportMetricsPartialSuccess) []*ExportMetricsPartialSuccess {
+ var newDest []*ExportMetricsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportMetricsPartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsPartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsPartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsPartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportMetricsPartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportMetricsPartialSuccess) Reset() {
+ *orig = ExportMetricsPartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportMetricsPartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedDataPoints != int64(0) {
+ dest.WriteObjectField("rejectedDataPoints")
+ dest.WriteInt64(orig.RejectedDataPoints)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportMetricsPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedDataPoints", "rejected_data_points":
+ orig.RejectedDataPoints = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportMetricsPartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedDataPoints != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedDataPoints))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportMetricsPartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedDataPoints != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedDataPoints))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportMetricsPartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedDataPoints = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportMetricsPartialSuccess() *ExportMetricsPartialSuccess {
+ orig := NewExportMetricsPartialSuccess()
+ orig.RejectedDataPoints = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportMetricsPartialSuccessPtrSlice() []*ExportMetricsPartialSuccess {
+ orig := make([]*ExportMetricsPartialSuccess, 5)
+ orig[0] = NewExportMetricsPartialSuccess()
+ orig[1] = GenTestExportMetricsPartialSuccess()
+ orig[2] = NewExportMetricsPartialSuccess()
+ orig[3] = GenTestExportMetricsPartialSuccess()
+ orig[4] = NewExportMetricsPartialSuccess()
+ return orig
+}
+
+func GenTestExportMetricsPartialSuccessSlice() []ExportMetricsPartialSuccess {
+ orig := make([]ExportMetricsPartialSuccess, 5)
+ orig[1] = *GenTestExportMetricsPartialSuccess()
+ orig[3] = *GenTestExportMetricsPartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go
new file mode 100644
index 000000000..f4a4a0fa7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Metrics is the top-level struct that is propagated through the metrics pipeline.
+// Use NewMetrics to create new instance, zero-initialized instance is not valid for use.
+type ExportMetricsServiceRequest struct {
+ ResourceMetrics []*ResourceMetrics
+}
+
+var (
+ protoPoolExportMetricsServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportMetricsServiceRequest{}
+ },
+ }
+)
+
+func NewExportMetricsServiceRequest() *ExportMetricsServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportMetricsServiceRequest{}
+ }
+ return protoPoolExportMetricsServiceRequest.Get().(*ExportMetricsServiceRequest)
+}
+
+func DeleteExportMetricsServiceRequest(orig *ExportMetricsServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceMetrics {
+ DeleteResourceMetrics(orig.ResourceMetrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportMetricsServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportMetricsServiceRequest(dest, src *ExportMetricsServiceRequest) *ExportMetricsServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportMetricsServiceRequest()
+ }
+ dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics)
+
+ return dest
+}
+
+func CopyExportMetricsServiceRequestSlice(dest, src []ExportMetricsServiceRequest) []ExportMetricsServiceRequest {
+ var newDest []ExportMetricsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportMetricsServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportMetricsServiceRequestPtrSlice(dest, src []*ExportMetricsServiceRequest) []*ExportMetricsServiceRequest {
+ var newDest []*ExportMetricsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportMetricsServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportMetricsServiceRequest) Reset() {
+ *orig = ExportMetricsServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportMetricsServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceMetrics) > 0 {
+ dest.WriteObjectField("resourceMetrics")
+ dest.WriteArrayStart()
+ orig.ResourceMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceMetrics); i++ {
+ dest.WriteMore()
+ orig.ResourceMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportMetricsServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceMetrics", "resource_metrics":
+ for iter.ReadArray() {
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportMetricsServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceMetrics {
+ l = orig.ResourceMetrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportMetricsServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
+ l = orig.ResourceMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportMetricsServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportMetricsServiceRequest() *ExportMetricsServiceRequest {
+ orig := NewExportMetricsServiceRequest()
+ orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()}
+ return orig
+}
+
+func GenTestExportMetricsServiceRequestPtrSlice() []*ExportMetricsServiceRequest {
+ orig := make([]*ExportMetricsServiceRequest, 5)
+ orig[0] = NewExportMetricsServiceRequest()
+ orig[1] = GenTestExportMetricsServiceRequest()
+ orig[2] = NewExportMetricsServiceRequest()
+ orig[3] = GenTestExportMetricsServiceRequest()
+ orig[4] = NewExportMetricsServiceRequest()
+ return orig
+}
+
+func GenTestExportMetricsServiceRequestSlice() []ExportMetricsServiceRequest {
+ orig := make([]ExportMetricsServiceRequest, 5)
+ orig[1] = *GenTestExportMetricsServiceRequest()
+ orig[3] = *GenTestExportMetricsServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go
new file mode 100644
index 000000000..19e69384e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportMetricsServiceResponse struct {
+ PartialSuccess ExportMetricsPartialSuccess
+}
+
+var (
+ protoPoolExportMetricsServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportMetricsServiceResponse{}
+ },
+ }
+)
+
+func NewExportMetricsServiceResponse() *ExportMetricsServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportMetricsServiceResponse{}
+ }
+ return protoPoolExportMetricsServiceResponse.Get().(*ExportMetricsServiceResponse)
+}
+
+func DeleteExportMetricsServiceResponse(orig *ExportMetricsServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportMetricsPartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportMetricsServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportMetricsServiceResponse(dest, src *ExportMetricsServiceResponse) *ExportMetricsServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportMetricsServiceResponse()
+ }
+ CopyExportMetricsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportMetricsServiceResponseSlice(dest, src []ExportMetricsServiceResponse) []ExportMetricsServiceResponse {
+ var newDest []ExportMetricsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportMetricsServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportMetricsServiceResponsePtrSlice(dest, src []*ExportMetricsServiceResponse) []*ExportMetricsServiceResponse {
+ var newDest []*ExportMetricsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportMetricsServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportMetricsServiceResponse) Reset() {
+ *orig = ExportMetricsServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportMetricsServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportMetricsServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportMetricsServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportMetricsServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportMetricsServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportMetricsServiceResponse() *ExportMetricsServiceResponse {
+ orig := NewExportMetricsServiceResponse()
+ orig.PartialSuccess = *GenTestExportMetricsPartialSuccess()
+ return orig
+}
+
+func GenTestExportMetricsServiceResponsePtrSlice() []*ExportMetricsServiceResponse {
+ orig := make([]*ExportMetricsServiceResponse, 5)
+ orig[0] = NewExportMetricsServiceResponse()
+ orig[1] = GenTestExportMetricsServiceResponse()
+ orig[2] = NewExportMetricsServiceResponse()
+ orig[3] = GenTestExportMetricsServiceResponse()
+ orig[4] = NewExportMetricsServiceResponse()
+ return orig
+}
+
+func GenTestExportMetricsServiceResponseSlice() []ExportMetricsServiceResponse {
+ orig := make([]ExportMetricsServiceResponse, 5)
+ orig[1] = *GenTestExportMetricsServiceResponse()
+ orig[3] = *GenTestExportMetricsServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go
new file mode 100644
index 000000000..03a20a624
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportProfilesPartialSuccess struct {
+ RejectedProfiles int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportProfilesPartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportProfilesPartialSuccess{}
+ },
+ }
+)
+
+func NewExportProfilesPartialSuccess() *ExportProfilesPartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportProfilesPartialSuccess{}
+ }
+ return protoPoolExportProfilesPartialSuccess.Get().(*ExportProfilesPartialSuccess)
+}
+
+func DeleteExportProfilesPartialSuccess(orig *ExportProfilesPartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportProfilesPartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportProfilesPartialSuccess(dest, src *ExportProfilesPartialSuccess) *ExportProfilesPartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportProfilesPartialSuccess()
+ }
+ dest.RejectedProfiles = src.RejectedProfiles
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportProfilesPartialSuccessSlice(dest, src []ExportProfilesPartialSuccess) []ExportProfilesPartialSuccess {
+ var newDest []ExportProfilesPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportProfilesPartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesPartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportProfilesPartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportProfilesPartialSuccessPtrSlice(dest, src []*ExportProfilesPartialSuccess) []*ExportProfilesPartialSuccess {
+ var newDest []*ExportProfilesPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportProfilesPartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesPartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesPartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesPartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportProfilesPartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportProfilesPartialSuccess) Reset() {
+ *orig = ExportProfilesPartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportProfilesPartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedProfiles != int64(0) {
+ dest.WriteObjectField("rejectedProfiles")
+ dest.WriteInt64(orig.RejectedProfiles)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportProfilesPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedProfiles", "rejected_profiles":
+ orig.RejectedProfiles = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportProfilesPartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedProfiles != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedProfiles))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportProfilesPartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedProfiles != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedProfiles))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportProfilesPartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedProfiles = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportProfilesPartialSuccess() *ExportProfilesPartialSuccess {
+ orig := NewExportProfilesPartialSuccess()
+ orig.RejectedProfiles = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportProfilesPartialSuccessPtrSlice() []*ExportProfilesPartialSuccess {
+ orig := make([]*ExportProfilesPartialSuccess, 5)
+ orig[0] = NewExportProfilesPartialSuccess()
+ orig[1] = GenTestExportProfilesPartialSuccess()
+ orig[2] = NewExportProfilesPartialSuccess()
+ orig[3] = GenTestExportProfilesPartialSuccess()
+ orig[4] = NewExportProfilesPartialSuccess()
+ return orig
+}
+
+func GenTestExportProfilesPartialSuccessSlice() []ExportProfilesPartialSuccess {
+ orig := make([]ExportProfilesPartialSuccess, 5)
+ orig[1] = *GenTestExportProfilesPartialSuccess()
+ orig[3] = *GenTestExportProfilesPartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go
new file mode 100644
index 000000000..5906ce520
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go
@@ -0,0 +1,280 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Profiles is the top-level struct that is propagated through the profiles pipeline.
+// Use NewProfiles to create new instance, zero-initialized instance is not valid for use.
+type ExportProfilesServiceRequest struct {
+ ResourceProfiles []*ResourceProfiles
+ Dictionary ProfilesDictionary
+}
+
+var (
+ protoPoolExportProfilesServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportProfilesServiceRequest{}
+ },
+ }
+)
+
+func NewExportProfilesServiceRequest() *ExportProfilesServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportProfilesServiceRequest{}
+ }
+ return protoPoolExportProfilesServiceRequest.Get().(*ExportProfilesServiceRequest)
+}
+
+func DeleteExportProfilesServiceRequest(orig *ExportProfilesServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceProfiles {
+ DeleteResourceProfiles(orig.ResourceProfiles[i], true)
+ }
+ DeleteProfilesDictionary(&orig.Dictionary, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportProfilesServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportProfilesServiceRequest(dest, src *ExportProfilesServiceRequest) *ExportProfilesServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportProfilesServiceRequest()
+ }
+ dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles)
+
+ CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary)
+
+ return dest
+}
+
+func CopyExportProfilesServiceRequestSlice(dest, src []ExportProfilesServiceRequest) []ExportProfilesServiceRequest {
+ var newDest []ExportProfilesServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportProfilesServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportProfilesServiceRequestPtrSlice(dest, src []*ExportProfilesServiceRequest) []*ExportProfilesServiceRequest {
+ var newDest []*ExportProfilesServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportProfilesServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportProfilesServiceRequest) Reset() {
+ *orig = ExportProfilesServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportProfilesServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceProfiles) > 0 {
+ dest.WriteObjectField("resourceProfiles")
+ dest.WriteArrayStart()
+ orig.ResourceProfiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceProfiles); i++ {
+ dest.WriteMore()
+ orig.ResourceProfiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectField("dictionary")
+ orig.Dictionary.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportProfilesServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceProfiles", "resource_profiles":
+ for iter.ReadArray() {
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "dictionary":
+
+ orig.Dictionary.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportProfilesServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceProfiles {
+ l = orig.ResourceProfiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.Dictionary.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportProfilesServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
+ l = orig.ResourceProfiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = orig.Dictionary.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *ExportProfilesServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Dictionary.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportProfilesServiceRequest() *ExportProfilesServiceRequest {
+ orig := NewExportProfilesServiceRequest()
+ orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()}
+ orig.Dictionary = *GenTestProfilesDictionary()
+ return orig
+}
+
+func GenTestExportProfilesServiceRequestPtrSlice() []*ExportProfilesServiceRequest {
+ orig := make([]*ExportProfilesServiceRequest, 5)
+ orig[0] = NewExportProfilesServiceRequest()
+ orig[1] = GenTestExportProfilesServiceRequest()
+ orig[2] = NewExportProfilesServiceRequest()
+ orig[3] = GenTestExportProfilesServiceRequest()
+ orig[4] = NewExportProfilesServiceRequest()
+ return orig
+}
+
+func GenTestExportProfilesServiceRequestSlice() []ExportProfilesServiceRequest {
+ orig := make([]ExportProfilesServiceRequest, 5)
+ orig[1] = *GenTestExportProfilesServiceRequest()
+ orig[3] = *GenTestExportProfilesServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go
new file mode 100644
index 000000000..0153c0609
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportProfilesServiceResponse struct {
+ PartialSuccess ExportProfilesPartialSuccess
+}
+
+var (
+ protoPoolExportProfilesServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportProfilesServiceResponse{}
+ },
+ }
+)
+
+func NewExportProfilesServiceResponse() *ExportProfilesServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportProfilesServiceResponse{}
+ }
+ return protoPoolExportProfilesServiceResponse.Get().(*ExportProfilesServiceResponse)
+}
+
+func DeleteExportProfilesServiceResponse(orig *ExportProfilesServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportProfilesPartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportProfilesServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportProfilesServiceResponse(dest, src *ExportProfilesServiceResponse) *ExportProfilesServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportProfilesServiceResponse()
+ }
+ CopyExportProfilesPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportProfilesServiceResponseSlice(dest, src []ExportProfilesServiceResponse) []ExportProfilesServiceResponse {
+ var newDest []ExportProfilesServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportProfilesServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportProfilesServiceResponsePtrSlice(dest, src []*ExportProfilesServiceResponse) []*ExportProfilesServiceResponse {
+ var newDest []*ExportProfilesServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportProfilesServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportProfilesServiceResponse) Reset() {
+ *orig = ExportProfilesServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportProfilesServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportProfilesServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportProfilesServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportProfilesServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportProfilesServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportProfilesServiceResponse() *ExportProfilesServiceResponse {
+ orig := NewExportProfilesServiceResponse()
+ orig.PartialSuccess = *GenTestExportProfilesPartialSuccess()
+ return orig
+}
+
+func GenTestExportProfilesServiceResponsePtrSlice() []*ExportProfilesServiceResponse {
+ orig := make([]*ExportProfilesServiceResponse, 5)
+ orig[0] = NewExportProfilesServiceResponse()
+ orig[1] = GenTestExportProfilesServiceResponse()
+ orig[2] = NewExportProfilesServiceResponse()
+ orig[3] = GenTestExportProfilesServiceResponse()
+ orig[4] = NewExportProfilesServiceResponse()
+ return orig
+}
+
+func GenTestExportProfilesServiceResponseSlice() []ExportProfilesServiceResponse {
+ orig := make([]ExportProfilesServiceResponse, 5)
+ orig[1] = *GenTestExportProfilesServiceResponse()
+ orig[3] = *GenTestExportProfilesServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go
new file mode 100644
index 000000000..df8025572
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportTracePartialSuccess struct {
+ RejectedSpans int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportTracePartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportTracePartialSuccess{}
+ },
+ }
+)
+
+func NewExportTracePartialSuccess() *ExportTracePartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportTracePartialSuccess{}
+ }
+ return protoPoolExportTracePartialSuccess.Get().(*ExportTracePartialSuccess)
+}
+
+func DeleteExportTracePartialSuccess(orig *ExportTracePartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportTracePartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportTracePartialSuccess(dest, src *ExportTracePartialSuccess) *ExportTracePartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportTracePartialSuccess()
+ }
+ dest.RejectedSpans = src.RejectedSpans
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportTracePartialSuccessSlice(dest, src []ExportTracePartialSuccess) []ExportTracePartialSuccess {
+ var newDest []ExportTracePartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportTracePartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTracePartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportTracePartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportTracePartialSuccessPtrSlice(dest, src []*ExportTracePartialSuccess) []*ExportTracePartialSuccess {
+ var newDest []*ExportTracePartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportTracePartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTracePartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTracePartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTracePartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportTracePartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportTracePartialSuccess) Reset() {
+ *orig = ExportTracePartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportTracePartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedSpans != int64(0) {
+ dest.WriteObjectField("rejectedSpans")
+ dest.WriteInt64(orig.RejectedSpans)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportTracePartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedSpans", "rejected_spans":
+ orig.RejectedSpans = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportTracePartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedSpans != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedSpans))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportTracePartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedSpans != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedSpans))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportTracePartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedSpans = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportTracePartialSuccess() *ExportTracePartialSuccess {
+ orig := NewExportTracePartialSuccess()
+ orig.RejectedSpans = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportTracePartialSuccessPtrSlice() []*ExportTracePartialSuccess {
+ orig := make([]*ExportTracePartialSuccess, 5)
+ orig[0] = NewExportTracePartialSuccess()
+ orig[1] = GenTestExportTracePartialSuccess()
+ orig[2] = NewExportTracePartialSuccess()
+ orig[3] = GenTestExportTracePartialSuccess()
+ orig[4] = NewExportTracePartialSuccess()
+ return orig
+}
+
+func GenTestExportTracePartialSuccessSlice() []ExportTracePartialSuccess {
+ orig := make([]ExportTracePartialSuccess, 5)
+ orig[1] = *GenTestExportTracePartialSuccess()
+ orig[3] = *GenTestExportTracePartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go
new file mode 100644
index 000000000..e64316f8f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Traces is the top-level struct that is propagated through the traces pipeline.
+// Use NewTraces to create new instance, zero-initialized instance is not valid for use.
+type ExportTraceServiceRequest struct {
+ ResourceSpans []*ResourceSpans
+}
+
+var (
+ protoPoolExportTraceServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportTraceServiceRequest{}
+ },
+ }
+)
+
+func NewExportTraceServiceRequest() *ExportTraceServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportTraceServiceRequest{}
+ }
+ return protoPoolExportTraceServiceRequest.Get().(*ExportTraceServiceRequest)
+}
+
+func DeleteExportTraceServiceRequest(orig *ExportTraceServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceSpans {
+ DeleteResourceSpans(orig.ResourceSpans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportTraceServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportTraceServiceRequest(dest, src *ExportTraceServiceRequest) *ExportTraceServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportTraceServiceRequest()
+ }
+ dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans)
+
+ return dest
+}
+
+func CopyExportTraceServiceRequestSlice(dest, src []ExportTraceServiceRequest) []ExportTraceServiceRequest {
+ var newDest []ExportTraceServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportTraceServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportTraceServiceRequestPtrSlice(dest, src []*ExportTraceServiceRequest) []*ExportTraceServiceRequest {
+ var newDest []*ExportTraceServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportTraceServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportTraceServiceRequest) Reset() {
+ *orig = ExportTraceServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportTraceServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceSpans) > 0 {
+ dest.WriteObjectField("resourceSpans")
+ dest.WriteArrayStart()
+ orig.ResourceSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceSpans); i++ {
+ dest.WriteMore()
+ orig.ResourceSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportTraceServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceSpans", "resource_spans":
+ for iter.ReadArray() {
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportTraceServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceSpans {
+ l = orig.ResourceSpans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportTraceServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
+ l = orig.ResourceSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportTraceServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportTraceServiceRequest() *ExportTraceServiceRequest {
+ orig := NewExportTraceServiceRequest()
+ orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()}
+ return orig
+}
+
+func GenTestExportTraceServiceRequestPtrSlice() []*ExportTraceServiceRequest {
+ orig := make([]*ExportTraceServiceRequest, 5)
+ orig[0] = NewExportTraceServiceRequest()
+ orig[1] = GenTestExportTraceServiceRequest()
+ orig[2] = NewExportTraceServiceRequest()
+ orig[3] = GenTestExportTraceServiceRequest()
+ orig[4] = NewExportTraceServiceRequest()
+ return orig
+}
+
+func GenTestExportTraceServiceRequestSlice() []ExportTraceServiceRequest {
+ orig := make([]ExportTraceServiceRequest, 5)
+ orig[1] = *GenTestExportTraceServiceRequest()
+ orig[3] = *GenTestExportTraceServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go
new file mode 100644
index 000000000..86be1a0fd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportTraceServiceResponse struct {
+ PartialSuccess ExportTracePartialSuccess
+}
+
+var (
+ protoPoolExportTraceServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportTraceServiceResponse{}
+ },
+ }
+)
+
+func NewExportTraceServiceResponse() *ExportTraceServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportTraceServiceResponse{}
+ }
+ return protoPoolExportTraceServiceResponse.Get().(*ExportTraceServiceResponse)
+}
+
+func DeleteExportTraceServiceResponse(orig *ExportTraceServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportTracePartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportTraceServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportTraceServiceResponse(dest, src *ExportTraceServiceResponse) *ExportTraceServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportTraceServiceResponse()
+ }
+ CopyExportTracePartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportTraceServiceResponseSlice(dest, src []ExportTraceServiceResponse) []ExportTraceServiceResponse {
+ var newDest []ExportTraceServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportTraceServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportTraceServiceResponsePtrSlice(dest, src []*ExportTraceServiceResponse) []*ExportTraceServiceResponse {
+ var newDest []*ExportTraceServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportTraceServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportTraceServiceResponse) Reset() {
+ *orig = ExportTraceServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportTraceServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportTraceServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportTraceServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportTraceServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportTraceServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportTraceServiceResponse() *ExportTraceServiceResponse {
+ orig := NewExportTraceServiceResponse()
+ orig.PartialSuccess = *GenTestExportTracePartialSuccess()
+ return orig
+}
+
+func GenTestExportTraceServiceResponsePtrSlice() []*ExportTraceServiceResponse {
+ orig := make([]*ExportTraceServiceResponse, 5)
+ orig[0] = NewExportTraceServiceResponse()
+ orig[1] = GenTestExportTraceServiceResponse()
+ orig[2] = NewExportTraceServiceResponse()
+ orig[3] = GenTestExportTraceServiceResponse()
+ orig[4] = NewExportTraceServiceResponse()
+ return orig
+}
+
+func GenTestExportTraceServiceResponseSlice() []ExportTraceServiceResponse {
+ orig := make([]ExportTraceServiceResponse, 5)
+ orig[1] = *GenTestExportTraceServiceResponse()
+ orig[3] = *GenTestExportTraceServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_function.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go
similarity index 60%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_function.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go
index 2999e35c2..4e8225390 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_function.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go
@@ -10,27 +10,34 @@ import (
"fmt"
"sync"
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source.
+type Function struct {
+ NameStrindex int32
+ SystemNameStrindex int32
+ FilenameStrindex int32
+ StartLine int64
+}
+
var (
protoPoolFunction = sync.Pool{
New: func() any {
- return &otlpprofiles.Function{}
+ return &Function{}
},
}
)
-func NewOrigFunction() *otlpprofiles.Function {
+func NewFunction() *Function {
if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.Function{}
+ return &Function{}
}
- return protoPoolFunction.Get().(*otlpprofiles.Function)
+ return protoPoolFunction.Get().(*Function)
}
-func DeleteOrigFunction(orig *otlpprofiles.Function, nullable bool) {
+func DeleteFunction(orig *Function, nullable bool) {
if orig == nil {
return
}
@@ -46,28 +53,84 @@ func DeleteOrigFunction(orig *otlpprofiles.Function, nullable bool) {
}
}
-func CopyOrigFunction(dest, src *otlpprofiles.Function) {
+func CopyFunction(dest, src *Function) *Function {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewFunction()
}
dest.NameStrindex = src.NameStrindex
+
dest.SystemNameStrindex = src.SystemNameStrindex
+
dest.FilenameStrindex = src.FilenameStrindex
+
dest.StartLine = src.StartLine
+
+ return dest
}
-func GenTestOrigFunction() *otlpprofiles.Function {
- orig := NewOrigFunction()
- orig.NameStrindex = int32(13)
- orig.SystemNameStrindex = int32(13)
- orig.FilenameStrindex = int32(13)
- orig.StartLine = int64(13)
- return orig
+func CopyFunctionSlice(dest, src []Function) []Function {
+ var newDest []Function
+ if cap(dest) < len(src) {
+ newDest = make([]Function, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteFunction(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyFunction(&newDest[i], &src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigFunction(orig *otlpprofiles.Function, dest *json.Stream) {
+func CopyFunctionPtrSlice(dest, src []*Function) []*Function {
+ var newDest []*Function
+ if cap(dest) < len(src) {
+ newDest = make([]*Function, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewFunction()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteFunction(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewFunction()
+ }
+ }
+ for i := range src {
+ CopyFunction(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Function) Reset() {
+ *orig = Function{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Function) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.NameStrindex != int32(0) {
dest.WriteObjectField("nameStrindex")
@@ -88,8 +151,8 @@ func MarshalJSONOrigFunction(orig *otlpprofiles.Function, dest *json.Stream) {
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigFunction unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigFunction(orig *otlpprofiles.Function, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Function) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "nameStrindex", "name_strindex":
@@ -106,7 +169,7 @@ func UnmarshalJSONOrigFunction(orig *otlpprofiles.Function, iter *json.Iterator)
}
}
-func SizeProtoOrigFunction(orig *otlpprofiles.Function) int {
+func (orig *Function) SizeProto() int {
var n int
var l int
_ = l
@@ -125,7 +188,7 @@ func SizeProtoOrigFunction(orig *otlpprofiles.Function) int {
return n
}
-func MarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) int {
+func (orig *Function) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
@@ -152,7 +215,7 @@ func MarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) int {
return len(buf) - pos
}
-func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error {
+func (orig *Function) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -223,3 +286,29 @@ func UnmarshalProtoOrigFunction(orig *otlpprofiles.Function, buf []byte) error {
}
return nil
}
+
+func GenTestFunction() *Function {
+ orig := NewFunction()
+ orig.NameStrindex = int32(13)
+ orig.SystemNameStrindex = int32(13)
+ orig.FilenameStrindex = int32(13)
+ orig.StartLine = int64(13)
+ return orig
+}
+
+func GenTestFunctionPtrSlice() []*Function {
+ orig := make([]*Function, 5)
+ orig[0] = NewFunction()
+ orig[1] = GenTestFunction()
+ orig[2] = NewFunction()
+ orig[3] = GenTestFunction()
+ orig[4] = NewFunction()
+ return orig
+}
+
+func GenTestFunctionSlice() []Function {
+ orig := make([]Function, 5)
+ orig[1] = *GenTestFunction()
+ orig[3] = *GenTestFunction()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go
new file mode 100644
index 000000000..e39eb2e79
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Gauge represents the type of a numeric metric that always exports the "current value" for every data point.
+type Gauge struct {
+ DataPoints []*NumberDataPoint
+}
+
+var (
+ protoPoolGauge = sync.Pool{
+ New: func() any {
+ return &Gauge{}
+ },
+ }
+)
+
+func NewGauge() *Gauge {
+ if !UseProtoPooling.IsEnabled() {
+ return &Gauge{}
+ }
+ return protoPoolGauge.Get().(*Gauge)
+}
+
+func DeleteGauge(orig *Gauge, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteNumberDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolGauge.Put(orig)
+ }
+}
+
+func CopyGauge(dest, src *Gauge) *Gauge {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewGauge()
+ }
+ dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ return dest
+}
+
+func CopyGaugeSlice(dest, src []Gauge) []Gauge {
+ var newDest []Gauge
+ if cap(dest) < len(src) {
+ newDest = make([]Gauge, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteGauge(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyGauge(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyGaugePtrSlice(dest, src []*Gauge) []*Gauge {
+ var newDest []*Gauge
+ if cap(dest) < len(src) {
+ newDest = make([]*Gauge, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewGauge()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteGauge(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewGauge()
+ }
+ }
+ for i := range src {
+ CopyGauge(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Gauge) Reset() {
+ *orig = Gauge{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Gauge) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Gauge) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Gauge) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Gauge) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *Gauge) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestGauge() *Gauge {
+ orig := NewGauge()
+ orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()}
+ return orig
+}
+
+func GenTestGaugePtrSlice() []*Gauge {
+ orig := make([]*Gauge, 5)
+ orig[0] = NewGauge()
+ orig[1] = GenTestGauge()
+ orig[2] = NewGauge()
+ orig[3] = GenTestGauge()
+ orig[4] = NewGauge()
+ return orig
+}
+
+func GenTestGaugeSlice() []Gauge {
+ orig := make([]Gauge, 5)
+ orig[1] = *GenTestGauge()
+ orig[3] = *GenTestGauge()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go
new file mode 100644
index 000000000..b35b328f9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go
@@ -0,0 +1,276 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.
+type Histogram struct {
+ DataPoints []*HistogramDataPoint
+ AggregationTemporality AggregationTemporality
+}
+
+var (
+ protoPoolHistogram = sync.Pool{
+ New: func() any {
+ return &Histogram{}
+ },
+ }
+)
+
+func NewHistogram() *Histogram {
+ if !UseProtoPooling.IsEnabled() {
+ return &Histogram{}
+ }
+ return protoPoolHistogram.Get().(*Histogram)
+}
+
+func DeleteHistogram(orig *Histogram, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteHistogramDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolHistogram.Put(orig)
+ }
+}
+
+func CopyHistogram(dest, src *Histogram) *Histogram {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewHistogram()
+ }
+ dest.DataPoints = CopyHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ dest.AggregationTemporality = src.AggregationTemporality
+
+ return dest
+}
+
+func CopyHistogramSlice(dest, src []Histogram) []Histogram {
+ var newDest []Histogram
+ if cap(dest) < len(src) {
+ newDest = make([]Histogram, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogram(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyHistogram(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyHistogramPtrSlice(dest, src []*Histogram) []*Histogram {
+ var newDest []*Histogram
+ if cap(dest) < len(src) {
+ newDest = make([]*Histogram, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogram()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogram(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogram()
+ }
+ }
+ for i := range src {
+ CopyHistogram(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Histogram) Reset() {
+ *orig = Histogram{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Histogram) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+
+ if int32(orig.AggregationTemporality) != 0 {
+ dest.WriteObjectField("aggregationTemporality")
+ dest.WriteInt32(int32(orig.AggregationTemporality))
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Histogram) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ case "aggregationTemporality", "aggregation_temporality":
+ orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Histogram) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.AggregationTemporality != 0 {
+ n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
+ }
+ return n
+}
+
+func (orig *Histogram) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.AggregationTemporality != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
+ pos--
+ buf[pos] = 0x10
+ }
+ return len(buf) - pos
+}
+
+func (orig *Histogram) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.AggregationTemporality = AggregationTemporality(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestHistogram() *Histogram {
+ orig := NewHistogram()
+ orig.DataPoints = []*HistogramDataPoint{{}, GenTestHistogramDataPoint()}
+ orig.AggregationTemporality = AggregationTemporality(13)
+ return orig
+}
+
+func GenTestHistogramPtrSlice() []*Histogram {
+ orig := make([]*Histogram, 5)
+ orig[0] = NewHistogram()
+ orig[1] = GenTestHistogram()
+ orig[2] = NewHistogram()
+ orig[3] = GenTestHistogram()
+ orig[4] = NewHistogram()
+ return orig
+}
+
+func GenTestHistogramSlice() []Histogram {
+ orig := make([]Histogram, 5)
+ orig[1] = *GenTestHistogram()
+ orig[3] = *GenTestHistogram()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go
similarity index 50%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapoint.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go
index 7d9240901..42953b3f9 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go
@@ -12,45 +12,112 @@ import (
"math"
"sync"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+func (m *HistogramDataPoint) GetSum_() any {
+ if m != nil {
+ return m.Sum_
+ }
+ return nil
+}
+
+type HistogramDataPoint_Sum struct {
+ Sum float64
+}
+
+func (m *HistogramDataPoint) GetSum() float64 {
+ if v, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok {
+ return v.Sum
+ }
+ return float64(0)
+}
+
+func (m *HistogramDataPoint) GetMin_() any {
+ if m != nil {
+ return m.Min_
+ }
+ return nil
+}
+
+type HistogramDataPoint_Min struct {
+ Min float64
+}
+
+func (m *HistogramDataPoint) GetMin() float64 {
+ if v, ok := m.GetMin_().(*HistogramDataPoint_Min); ok {
+ return v.Min
+ }
+ return float64(0)
+}
+
+func (m *HistogramDataPoint) GetMax_() any {
+ if m != nil {
+ return m.Max_
+ }
+ return nil
+}
+
+type HistogramDataPoint_Max struct {
+ Max float64
+}
+
+func (m *HistogramDataPoint) GetMax() float64 {
+ if v, ok := m.GetMax_().(*HistogramDataPoint_Max); ok {
+ return v.Max
+ }
+ return float64(0)
+}
+
+// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values.
+type HistogramDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Count uint64
+ Sum_ any
+ BucketCounts []uint64
+ ExplicitBounds []float64
+ Exemplars []Exemplar
+ Flags uint32
+ Min_ any
+ Max_ any
+}
+
var (
protoPoolHistogramDataPoint = sync.Pool{
New: func() any {
- return &otlpmetrics.HistogramDataPoint{}
+ return &HistogramDataPoint{}
},
}
ProtoPoolHistogramDataPoint_Sum = sync.Pool{
New: func() any {
- return &otlpmetrics.HistogramDataPoint_Sum{}
+ return &HistogramDataPoint_Sum{}
},
}
ProtoPoolHistogramDataPoint_Min = sync.Pool{
New: func() any {
- return &otlpmetrics.HistogramDataPoint_Min{}
+ return &HistogramDataPoint_Min{}
},
}
ProtoPoolHistogramDataPoint_Max = sync.Pool{
New: func() any {
- return &otlpmetrics.HistogramDataPoint_Max{}
+ return &HistogramDataPoint_Max{}
},
}
)
-func NewOrigHistogramDataPoint() *otlpmetrics.HistogramDataPoint {
+func NewHistogramDataPoint() *HistogramDataPoint {
if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.HistogramDataPoint{}
+ return &HistogramDataPoint{}
}
- return protoPoolHistogramDataPoint.Get().(*otlpmetrics.HistogramDataPoint)
+ return protoPoolHistogramDataPoint.Get().(*HistogramDataPoint)
}
-func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable bool) {
+func DeleteHistogramDataPoint(orig *HistogramDataPoint, nullable bool) {
if orig == nil {
return
}
@@ -61,10 +128,10 @@ func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable
}
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Sum_.(type) {
- case *otlpmetrics.HistogramDataPoint_Sum:
+ case *HistogramDataPoint_Sum:
if UseProtoPooling.IsEnabled() {
ov.Sum = float64(0)
ProtoPoolHistogramDataPoint_Sum.Put(ov)
@@ -72,10 +139,10 @@ func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable
}
for i := range orig.Exemplars {
- DeleteOrigExemplar(&orig.Exemplars[i], false)
+ DeleteExemplar(&orig.Exemplars[i], false)
}
switch ov := orig.Min_.(type) {
- case *otlpmetrics.HistogramDataPoint_Min:
+ case *HistogramDataPoint_Min:
if UseProtoPooling.IsEnabled() {
ov.Min = float64(0)
ProtoPoolHistogramDataPoint_Min.Put(ov)
@@ -83,7 +150,7 @@ func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable
}
switch ov := orig.Max_.(type) {
- case *otlpmetrics.HistogramDataPoint_Max:
+ case *HistogramDataPoint_Max:
if UseProtoPooling.IsEnabled() {
ov.Max = float64(0)
ProtoPoolHistogramDataPoint_Max.Put(ov)
@@ -97,77 +164,140 @@ func DeleteOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable
}
}
-func CopyOrigHistogramDataPoint(dest, src *otlpmetrics.HistogramDataPoint) {
+func CopyHistogramDataPoint(dest, src *HistogramDataPoint) *HistogramDataPoint {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewHistogramDataPoint()
}
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.StartTimeUnixNano = src.StartTimeUnixNano
+
dest.TimeUnixNano = src.TimeUnixNano
+
dest.Count = src.Count
- if srcSum, ok := src.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
- destSum, ok := dest.Sum_.(*otlpmetrics.HistogramDataPoint_Sum)
- if !ok {
- destSum = &otlpmetrics.HistogramDataPoint_Sum{}
- dest.Sum_ = destSum
+
+ switch t := src.Sum_.(type) {
+ case *HistogramDataPoint_Sum:
+ var ov *HistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
}
- destSum.Sum = srcSum.Sum
- } else {
+ ov.Sum = t.Sum
+ dest.Sum_ = ov
+ default:
dest.Sum_ = nil
}
- dest.BucketCounts = CopyOrigUint64Slice(dest.BucketCounts, src.BucketCounts)
- dest.ExplicitBounds = CopyOrigFloat64Slice(dest.ExplicitBounds, src.ExplicitBounds)
- dest.Exemplars = CopyOrigExemplarSlice(dest.Exemplars, src.Exemplars)
+
+ dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...)
+ dest.ExplicitBounds = append(dest.ExplicitBounds[:0], src.ExplicitBounds...)
+ dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
+
dest.Flags = src.Flags
- if srcMin, ok := src.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
- destMin, ok := dest.Min_.(*otlpmetrics.HistogramDataPoint_Min)
- if !ok {
- destMin = &otlpmetrics.HistogramDataPoint_Min{}
- dest.Min_ = destMin
+
+ switch t := src.Min_.(type) {
+ case *HistogramDataPoint_Min:
+ var ov *HistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
}
- destMin.Min = srcMin.Min
- } else {
+ ov.Min = t.Min
+ dest.Min_ = ov
+ default:
dest.Min_ = nil
}
- if srcMax, ok := src.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
- destMax, ok := dest.Max_.(*otlpmetrics.HistogramDataPoint_Max)
- if !ok {
- destMax = &otlpmetrics.HistogramDataPoint_Max{}
- dest.Max_ = destMax
+
+ switch t := src.Max_.(type) {
+ case *HistogramDataPoint_Max:
+ var ov *HistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
}
- destMax.Max = srcMax.Max
- } else {
+ ov.Max = t.Max
+ dest.Max_ = ov
+ default:
dest.Max_ = nil
}
+
+ return dest
}
-func GenTestOrigHistogramDataPoint() *otlpmetrics.HistogramDataPoint {
- orig := NewOrigHistogramDataPoint()
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.StartTimeUnixNano = 1234567890
- orig.TimeUnixNano = 1234567890
- orig.Count = uint64(13)
- orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: float64(3.1415926)}
- orig.BucketCounts = GenerateOrigTestUint64Slice()
- orig.ExplicitBounds = GenerateOrigTestFloat64Slice()
- orig.Exemplars = GenerateOrigTestExemplarSlice()
- orig.Flags = 1
- orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: float64(3.1415926)}
- orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: float64(3.1415926)}
- return orig
+func CopyHistogramDataPointSlice(dest, src []HistogramDataPoint) []HistogramDataPoint {
+ var newDest []HistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]HistogramDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogramDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyHistogramDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyHistogramDataPointPtrSlice(dest, src []*HistogramDataPoint) []*HistogramDataPoint {
+ var newDest []*HistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*HistogramDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogramDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogramDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogramDataPoint()
+ }
+ }
+ for i := range src {
+ CopyHistogramDataPoint(newDest[i], src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, dest *json.Stream) {
+func (orig *HistogramDataPoint) Reset() {
+ *orig = HistogramDataPoint{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *HistogramDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -183,7 +313,7 @@ func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, des
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
- if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
+ if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
@@ -210,10 +340,10 @@ func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, des
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
- MarshalJSONOrigExemplar(&orig.Exemplars[0], dest)
+ orig.Exemplars[0].MarshalJSON(dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
- MarshalJSONOrigExemplar(&orig.Exemplars[i], dest)
+ orig.Exemplars[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -221,25 +351,25 @@ func MarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, des
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
- if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
+ if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
dest.WriteObjectField("min")
dest.WriteFloat64(orig.Min)
}
- if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
+ if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
dest.WriteObjectField("max")
dest.WriteFloat64(orig.Max)
}
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigHistogramDataPoint unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *HistogramDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
@@ -250,11 +380,11 @@ func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, i
orig.Count = iter.ReadUint64()
case "sum":
{
- var ov *otlpmetrics.HistogramDataPoint_Sum
+ var ov *HistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.HistogramDataPoint_Sum{}
+ ov = &HistogramDataPoint_Sum{}
} else {
- ov = ProtoPoolHistogramDataPoint_Sum.Get().(*otlpmetrics.HistogramDataPoint_Sum)
+ ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
}
ov.Sum = iter.ReadFloat64()
orig.Sum_ = ov
@@ -272,19 +402,19 @@ func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, i
case "exemplars":
for iter.ReadArray() {
- orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
- UnmarshalJSONOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter)
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
}
case "flags":
orig.Flags = iter.ReadUint32()
case "min":
{
- var ov *otlpmetrics.HistogramDataPoint_Min
+ var ov *HistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.HistogramDataPoint_Min{}
+ ov = &HistogramDataPoint_Min{}
} else {
- ov = ProtoPoolHistogramDataPoint_Min.Get().(*otlpmetrics.HistogramDataPoint_Min)
+ ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
}
ov.Min = iter.ReadFloat64()
orig.Min_ = ov
@@ -292,11 +422,11 @@ func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, i
case "max":
{
- var ov *otlpmetrics.HistogramDataPoint_Max
+ var ov *HistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.HistogramDataPoint_Max{}
+ ov = &HistogramDataPoint_Max{}
} else {
- ov = ProtoPoolHistogramDataPoint_Max.Get().(*otlpmetrics.HistogramDataPoint_Max)
+ ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
}
ov.Max = iter.ReadFloat64()
orig.Max_ = ov
@@ -308,12 +438,12 @@ func UnmarshalJSONOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, i
}
}
-func SizeProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint) int {
+func (orig *HistogramDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
@@ -325,7 +455,7 @@ func SizeProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint) int {
if orig.Count != 0 {
n += 9
}
- if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
+ if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
_ = orig
n += 9
}
@@ -340,29 +470,29 @@ func SizeProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint) int {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Exemplars {
- l = SizeProtoOrigExemplar(&orig.Exemplars[i])
+ l = orig.Exemplars[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
- if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
+ if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
_ = orig
n += 9
}
- if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
+ if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
_ = orig
n += 9
}
return n
}
-func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, buf []byte) int {
+func (orig *HistogramDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -386,7 +516,7 @@ func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, bu
pos--
buf[pos] = 0x21
}
- if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
+ if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
@@ -413,7 +543,7 @@ func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, bu
buf[pos] = 0x3a
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
- l = MarshalProtoOrigExemplar(&orig.Exemplars[i], buf[:pos])
+ l = orig.Exemplars[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -424,13 +554,13 @@ func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, bu
pos--
buf[pos] = 0x50
}
- if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
+ if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
pos--
buf[pos] = 0x59
}
- if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
+ if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
pos--
@@ -439,7 +569,7 @@ func MarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, bu
return len(buf) - pos
}
-func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, buf []byte) error {
+func (orig *HistogramDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -464,8 +594,8 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint,
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -515,59 +645,77 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint,
if err != nil {
return err
}
- var ov *otlpmetrics.HistogramDataPoint_Sum
+ var ov *HistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.HistogramDataPoint_Sum{}
+ ov = &HistogramDataPoint_Sum{}
} else {
- ov = ProtoPoolHistogramDataPoint_Sum.Get().(*otlpmetrics.HistogramDataPoint_Sum)
+ ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
}
ov.Sum = math.Float64frombits(num)
orig.Sum_ = ov
case 6:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- size := length / 8
- orig.BucketCounts = make([]uint64, size)
- var num uint64
- for i := 0; i < size; i++ {
- num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
- orig.BucketCounts[i] = uint64(num)
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
+ startPos := pos - length
+ size := length / 8
+ orig.BucketCounts = make([]uint64, size)
+ var num uint64
+ for i := 0; i < size; i++ {
+ num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts[i] = uint64(num)
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
+ }
+ case proto.WireTypeI64:
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts = append(orig.BucketCounts, uint64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
}
case 7:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- size := length / 8
- orig.ExplicitBounds = make([]float64, size)
- var num uint64
- for i := 0; i < size; i++ {
- num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
- orig.ExplicitBounds[i] = math.Float64frombits(num)
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field ExplicitBounds", pos-startPos)
+ startPos := pos - length
+ size := length / 8
+ orig.ExplicitBounds = make([]float64, size)
+ var num uint64
+ for i := 0; i < size; i++ {
+ num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.ExplicitBounds[i] = math.Float64frombits(num)
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field ExplicitBounds", pos-startPos)
+ }
+ case proto.WireTypeI64:
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.ExplicitBounds = append(orig.ExplicitBounds, math.Float64frombits(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
}
case 8:
@@ -580,8 +728,8 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint,
return err
}
startPos := pos - length
- orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
- err = UnmarshalProtoOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos])
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -607,11 +755,11 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint,
if err != nil {
return err
}
- var ov *otlpmetrics.HistogramDataPoint_Min
+ var ov *HistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.HistogramDataPoint_Min{}
+ ov = &HistogramDataPoint_Min{}
} else {
- ov = ProtoPoolHistogramDataPoint_Min.Get().(*otlpmetrics.HistogramDataPoint_Min)
+ ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
}
ov.Min = math.Float64frombits(num)
orig.Min_ = ov
@@ -625,11 +773,11 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint,
if err != nil {
return err
}
- var ov *otlpmetrics.HistogramDataPoint_Max
+ var ov *HistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.HistogramDataPoint_Max{}
+ ov = &HistogramDataPoint_Max{}
} else {
- ov = ProtoPoolHistogramDataPoint_Max.Get().(*otlpmetrics.HistogramDataPoint_Max)
+ ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
}
ov.Max = math.Float64frombits(num)
orig.Max_ = ov
@@ -642,3 +790,36 @@ func UnmarshalProtoOrigHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint,
}
return nil
}
+
+func GenTestHistogramDataPoint() *HistogramDataPoint {
+ orig := NewHistogramDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Count = uint64(13)
+ orig.Sum_ = &HistogramDataPoint_Sum{Sum: float64(3.1415926)}
+ orig.BucketCounts = []uint64{uint64(0), uint64(13)}
+ orig.ExplicitBounds = []float64{float64(0), float64(3.1415926)}
+ orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
+ orig.Flags = uint32(13)
+ orig.Min_ = &HistogramDataPoint_Min{Min: float64(3.1415926)}
+ orig.Max_ = &HistogramDataPoint_Max{Max: float64(3.1415926)}
+ return orig
+}
+
+func GenTestHistogramDataPointPtrSlice() []*HistogramDataPoint {
+ orig := make([]*HistogramDataPoint, 5)
+ orig[0] = NewHistogramDataPoint()
+ orig[1] = GenTestHistogramDataPoint()
+ orig[2] = NewHistogramDataPoint()
+ orig[3] = GenTestHistogramDataPoint()
+ orig[4] = NewHistogramDataPoint()
+ return orig
+}
+
+func GenTestHistogramDataPointSlice() []HistogramDataPoint {
+ orig := make([]HistogramDataPoint, 5)
+ orig[1] = *GenTestHistogramDataPoint()
+ orig[3] = *GenTestHistogramDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go
new file mode 100644
index 000000000..1e64085e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go
@@ -0,0 +1,343 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// InstrumentationScope is a message representing the instrumentation scope information.
+type InstrumentationScope struct {
+ Name string
+ Version string
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+}
+
+var (
+ protoPoolInstrumentationScope = sync.Pool{
+ New: func() any {
+ return &InstrumentationScope{}
+ },
+ }
+)
+
+func NewInstrumentationScope() *InstrumentationScope {
+ if !UseProtoPooling.IsEnabled() {
+ return &InstrumentationScope{}
+ }
+ return protoPoolInstrumentationScope.Get().(*InstrumentationScope)
+}
+
+func DeleteInstrumentationScope(orig *InstrumentationScope, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolInstrumentationScope.Put(orig)
+ }
+}
+
+func CopyInstrumentationScope(dest, src *InstrumentationScope) *InstrumentationScope {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewInstrumentationScope()
+ }
+ dest.Name = src.Name
+
+ dest.Version = src.Version
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ return dest
+}
+
+func CopyInstrumentationScopeSlice(dest, src []InstrumentationScope) []InstrumentationScope {
+ var newDest []InstrumentationScope
+ if cap(dest) < len(src) {
+ newDest = make([]InstrumentationScope, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteInstrumentationScope(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyInstrumentationScope(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyInstrumentationScopePtrSlice(dest, src []*InstrumentationScope) []*InstrumentationScope {
+ var newDest []*InstrumentationScope
+ if cap(dest) < len(src) {
+ newDest = make([]*InstrumentationScope, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewInstrumentationScope()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteInstrumentationScope(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewInstrumentationScope()
+ }
+ }
+ for i := range src {
+ CopyInstrumentationScope(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *InstrumentationScope) Reset() {
+ *orig = InstrumentationScope{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *InstrumentationScope) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+ if orig.Version != "" {
+ dest.WriteObjectField("version")
+ dest.WriteString(orig.Version)
+ }
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *InstrumentationScope) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "name":
+ orig.Name = iter.ReadString()
+ case "version":
+ orig.Version = iter.ReadString()
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *InstrumentationScope) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Version)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ return n
+}
+
+func (orig *InstrumentationScope) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Version)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Version)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x20
+ }
+ return len(buf) - pos
+}
+
+func (orig *InstrumentationScope) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Version = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestInstrumentationScope() *InstrumentationScope {
+ orig := NewInstrumentationScope()
+ orig.Name = "test_name"
+ orig.Version = "test_version"
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ return orig
+}
+
+func GenTestInstrumentationScopePtrSlice() []*InstrumentationScope {
+ orig := make([]*InstrumentationScope, 5)
+ orig[0] = NewInstrumentationScope()
+ orig[1] = GenTestInstrumentationScope()
+ orig[2] = NewInstrumentationScope()
+ orig[3] = GenTestInstrumentationScope()
+ orig[4] = NewInstrumentationScope()
+ return orig
+}
+
+func GenTestInstrumentationScopeSlice() []InstrumentationScope {
+ orig := make([]InstrumentationScope, 5)
+ orig[1] = *GenTestInstrumentationScope()
+ orig[3] = *GenTestInstrumentationScope()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go
new file mode 100644
index 000000000..2cd6029a6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go
@@ -0,0 +1,265 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type IPAddr struct {
+ IP []byte
+ Zone string
+}
+
+var (
+ protoPoolIPAddr = sync.Pool{
+ New: func() any {
+ return &IPAddr{}
+ },
+ }
+)
+
+func NewIPAddr() *IPAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &IPAddr{}
+ }
+ return protoPoolIPAddr.Get().(*IPAddr)
+}
+
+func DeleteIPAddr(orig *IPAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolIPAddr.Put(orig)
+ }
+}
+
+func CopyIPAddr(dest, src *IPAddr) *IPAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewIPAddr()
+ }
+ dest.IP = src.IP
+
+ dest.Zone = src.Zone
+
+ return dest
+}
+
+func CopyIPAddrSlice(dest, src []IPAddr) []IPAddr {
+ var newDest []IPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]IPAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteIPAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyIPAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyIPAddrPtrSlice(dest, src []*IPAddr) []*IPAddr {
+ var newDest []*IPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*IPAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewIPAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteIPAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewIPAddr()
+ }
+ }
+ for i := range src {
+ CopyIPAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *IPAddr) Reset() {
+ *orig = IPAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *IPAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+
+ if len(orig.IP) > 0 {
+ dest.WriteObjectField("iP")
+ dest.WriteBytes(orig.IP)
+ }
+ if orig.Zone != "" {
+ dest.WriteObjectField("zone")
+ dest.WriteString(orig.Zone)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *IPAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "iP":
+ orig.IP = iter.ReadBytes()
+ case "zone":
+ orig.Zone = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *IPAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *IPAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.IP)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Zone)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *IPAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.IP = make([]byte, length)
+ copy(orig.IP, buf[startPos:pos])
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Zone = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestIPAddr() *IPAddr {
+ orig := NewIPAddr()
+ orig.IP = []byte{1, 2, 3}
+ orig.Zone = "test_zone"
+ return orig
+}
+
+func GenTestIPAddrPtrSlice() []*IPAddr {
+ orig := make([]*IPAddr, 5)
+ orig[0] = NewIPAddr()
+ orig[1] = GenTestIPAddr()
+ orig[2] = NewIPAddr()
+ orig[3] = GenTestIPAddr()
+ orig[4] = NewIPAddr()
+ return orig
+}
+
+func GenTestIPAddrSlice() []IPAddr {
+ orig := make([]IPAddr, 5)
+ orig[1] = *GenTestIPAddr()
+ orig[3] = *GenTestIPAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go
new file mode 100644
index 000000000..3208776fb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go
@@ -0,0 +1,262 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type KeyValue struct {
+ Key string
+ Value AnyValue
+}
+
+var (
+ protoPoolKeyValue = sync.Pool{
+ New: func() any {
+ return &KeyValue{}
+ },
+ }
+)
+
+func NewKeyValue() *KeyValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &KeyValue{}
+ }
+ return protoPoolKeyValue.Get().(*KeyValue)
+}
+
+func DeleteKeyValue(orig *KeyValue, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteAnyValue(&orig.Value, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolKeyValue.Put(orig)
+ }
+}
+
+func CopyKeyValue(dest, src *KeyValue) *KeyValue {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewKeyValue()
+ }
+ dest.Key = src.Key
+
+ CopyAnyValue(&dest.Value, &src.Value)
+
+ return dest
+}
+
+func CopyKeyValueSlice(dest, src []KeyValue) []KeyValue {
+ var newDest []KeyValue
+ if cap(dest) < len(src) {
+ newDest = make([]KeyValue, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValue(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyKeyValue(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyKeyValuePtrSlice(dest, src []*KeyValue) []*KeyValue {
+ var newDest []*KeyValue
+ if cap(dest) < len(src) {
+ newDest = make([]*KeyValue, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValue()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValue(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValue()
+ }
+ }
+ for i := range src {
+ CopyKeyValue(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *KeyValue) Reset() {
+ *orig = KeyValue{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *KeyValue) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Key != "" {
+ dest.WriteObjectField("key")
+ dest.WriteString(orig.Key)
+ }
+ dest.WriteObjectField("value")
+ orig.Value.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *KeyValue) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "key":
+ orig.Key = iter.ReadString()
+ case "value":
+
+ orig.Value.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *KeyValue) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Key)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.Value.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *KeyValue) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Key)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Key)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = orig.Value.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *KeyValue) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Key = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Value.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestKeyValue() *KeyValue {
+ orig := NewKeyValue()
+ orig.Key = "test_key"
+ orig.Value = *GenTestAnyValue()
+ return orig
+}
+
+func GenTestKeyValuePtrSlice() []*KeyValue {
+ orig := make([]*KeyValue, 5)
+ orig[0] = NewKeyValue()
+ orig[1] = GenTestKeyValue()
+ orig[2] = NewKeyValue()
+ orig[3] = GenTestKeyValue()
+ orig[4] = NewKeyValue()
+ return orig
+}
+
+func GenTestKeyValueSlice() []KeyValue {
+ orig := make([]KeyValue, 5)
+ orig[1] = *GenTestKeyValue()
+ orig[3] = *GenTestKeyValue()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go
new file mode 100644
index 000000000..14c2e763d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go
@@ -0,0 +1,291 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// KeyValueAndUnit represents a custom 'dictionary native'
+// style of encoding attributes which is more convenient
+// for profiles than opentelemetry.proto.common.v1.KeyValue.
+type KeyValueAndUnit struct {
+ KeyStrindex int32
+ Value AnyValue
+ UnitStrindex int32
+}
+
+var (
+ protoPoolKeyValueAndUnit = sync.Pool{
+ New: func() any {
+ return &KeyValueAndUnit{}
+ },
+ }
+)
+
+func NewKeyValueAndUnit() *KeyValueAndUnit {
+ if !UseProtoPooling.IsEnabled() {
+ return &KeyValueAndUnit{}
+ }
+ return protoPoolKeyValueAndUnit.Get().(*KeyValueAndUnit)
+}
+
+func DeleteKeyValueAndUnit(orig *KeyValueAndUnit, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteAnyValue(&orig.Value, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolKeyValueAndUnit.Put(orig)
+ }
+}
+
+func CopyKeyValueAndUnit(dest, src *KeyValueAndUnit) *KeyValueAndUnit {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewKeyValueAndUnit()
+ }
+ dest.KeyStrindex = src.KeyStrindex
+
+ CopyAnyValue(&dest.Value, &src.Value)
+
+ dest.UnitStrindex = src.UnitStrindex
+
+ return dest
+}
+
+func CopyKeyValueAndUnitSlice(dest, src []KeyValueAndUnit) []KeyValueAndUnit {
+ var newDest []KeyValueAndUnit
+ if cap(dest) < len(src) {
+ newDest = make([]KeyValueAndUnit, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueAndUnit(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyKeyValueAndUnit(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyKeyValueAndUnitPtrSlice(dest, src []*KeyValueAndUnit) []*KeyValueAndUnit {
+ var newDest []*KeyValueAndUnit
+ if cap(dest) < len(src) {
+ newDest = make([]*KeyValueAndUnit, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueAndUnit()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueAndUnit(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueAndUnit()
+ }
+ }
+ for i := range src {
+ CopyKeyValueAndUnit(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *KeyValueAndUnit) Reset() {
+ *orig = KeyValueAndUnit{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *KeyValueAndUnit) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.KeyStrindex != int32(0) {
+ dest.WriteObjectField("keyStrindex")
+ dest.WriteInt32(orig.KeyStrindex)
+ }
+ dest.WriteObjectField("value")
+ orig.Value.MarshalJSON(dest)
+ if orig.UnitStrindex != int32(0) {
+ dest.WriteObjectField("unitStrindex")
+ dest.WriteInt32(orig.UnitStrindex)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *KeyValueAndUnit) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "keyStrindex", "key_strindex":
+ orig.KeyStrindex = iter.ReadInt32()
+ case "value":
+
+ orig.Value.UnmarshalJSON(iter)
+ case "unitStrindex", "unit_strindex":
+ orig.UnitStrindex = iter.ReadInt32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *KeyValueAndUnit) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.KeyStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.KeyStrindex))
+ }
+ l = orig.Value.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.UnitStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.UnitStrindex))
+ }
+ return n
+}
+
+func (orig *KeyValueAndUnit) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.KeyStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.KeyStrindex))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = orig.Value.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ if orig.UnitStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
+ pos--
+ buf[pos] = 0x18
+ }
+ return len(buf) - pos
+}
+
+func (orig *KeyValueAndUnit) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.KeyStrindex = int32(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Value.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.UnitStrindex = int32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestKeyValueAndUnit() *KeyValueAndUnit {
+ orig := NewKeyValueAndUnit()
+ orig.KeyStrindex = int32(13)
+ orig.Value = *GenTestAnyValue()
+ orig.UnitStrindex = int32(13)
+ return orig
+}
+
+func GenTestKeyValueAndUnitPtrSlice() []*KeyValueAndUnit {
+ orig := make([]*KeyValueAndUnit, 5)
+ orig[0] = NewKeyValueAndUnit()
+ orig[1] = GenTestKeyValueAndUnit()
+ orig[2] = NewKeyValueAndUnit()
+ orig[3] = GenTestKeyValueAndUnit()
+ orig[4] = NewKeyValueAndUnit()
+ return orig
+}
+
+func GenTestKeyValueAndUnitSlice() []KeyValueAndUnit {
+ orig := make([]KeyValueAndUnit, 5)
+ orig[1] = *GenTestKeyValueAndUnit()
+ orig[3] = *GenTestKeyValueAndUnit()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go
new file mode 100644
index 000000000..1cb1fef73
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message since oneof in AnyValue does not allow repeated fields.
+type KeyValueList struct {
+ Values []KeyValue
+}
+
+var (
+ protoPoolKeyValueList = sync.Pool{
+ New: func() any {
+ return &KeyValueList{}
+ },
+ }
+)
+
+func NewKeyValueList() *KeyValueList {
+ if !UseProtoPooling.IsEnabled() {
+ return &KeyValueList{}
+ }
+ return protoPoolKeyValueList.Get().(*KeyValueList)
+}
+
+func DeleteKeyValueList(orig *KeyValueList, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Values {
+ DeleteKeyValue(&orig.Values[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolKeyValueList.Put(orig)
+ }
+}
+
+func CopyKeyValueList(dest, src *KeyValueList) *KeyValueList {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewKeyValueList()
+ }
+ dest.Values = CopyKeyValueSlice(dest.Values, src.Values)
+
+ return dest
+}
+
+func CopyKeyValueListSlice(dest, src []KeyValueList) []KeyValueList {
+ var newDest []KeyValueList
+ if cap(dest) < len(src) {
+ newDest = make([]KeyValueList, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueList(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyKeyValueList(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyKeyValueListPtrSlice(dest, src []*KeyValueList) []*KeyValueList {
+ var newDest []*KeyValueList
+ if cap(dest) < len(src) {
+ newDest = make([]*KeyValueList, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueList()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueList(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueList()
+ }
+ }
+ for i := range src {
+ CopyKeyValueList(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *KeyValueList) Reset() {
+ *orig = KeyValueList{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *KeyValueList) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Values) > 0 {
+ dest.WriteObjectField("values")
+ dest.WriteArrayStart()
+ orig.Values[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Values); i++ {
+ dest.WriteMore()
+ orig.Values[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *KeyValueList) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "values":
+ for iter.ReadArray() {
+ orig.Values = append(orig.Values, KeyValue{})
+ orig.Values[len(orig.Values)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *KeyValueList) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Values {
+ l = orig.Values[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *KeyValueList) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Values) - 1; i >= 0; i-- {
+ l = orig.Values[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *KeyValueList) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Values = append(orig.Values, KeyValue{})
+ err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestKeyValueList() *KeyValueList {
+ orig := NewKeyValueList()
+ orig.Values = []KeyValue{{}, *GenTestKeyValue()}
+ return orig
+}
+
+func GenTestKeyValueListPtrSlice() []*KeyValueList {
+ orig := make([]*KeyValueList, 5)
+ orig[0] = NewKeyValueList()
+ orig[1] = GenTestKeyValueList()
+ orig[2] = NewKeyValueList()
+ orig[3] = GenTestKeyValueList()
+ orig[4] = NewKeyValueList()
+ return orig
+}
+
+func GenTestKeyValueListSlice() []KeyValueList {
+ orig := make([]KeyValueList, 5)
+ orig[1] = *GenTestKeyValueList()
+ orig[3] = *GenTestKeyValueList()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_line.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go
similarity index 56%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_line.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go
index 4b455cfb1..f9cde7b96 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_line.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go
@@ -10,27 +10,33 @@ import (
"fmt"
"sync"
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// Line details a specific line in a source code, linked to a function.
+type Line struct {
+ FunctionIndex int32
+ Line int64
+ Column int64
+}
+
var (
protoPoolLine = sync.Pool{
New: func() any {
- return &otlpprofiles.Line{}
+ return &Line{}
},
}
)
-func NewOrigLine() *otlpprofiles.Line {
+func NewLine() *Line {
if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.Line{}
+ return &Line{}
}
- return protoPoolLine.Get().(*otlpprofiles.Line)
+ return protoPoolLine.Get().(*Line)
}
-func DeleteOrigLine(orig *otlpprofiles.Line, nullable bool) {
+func DeleteLine(orig *Line, nullable bool) {
if orig == nil {
return
}
@@ -46,26 +52,82 @@ func DeleteOrigLine(orig *otlpprofiles.Line, nullable bool) {
}
}
-func CopyOrigLine(dest, src *otlpprofiles.Line) {
+func CopyLine(dest, src *Line) *Line {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLine()
}
dest.FunctionIndex = src.FunctionIndex
+
dest.Line = src.Line
+
dest.Column = src.Column
+
+ return dest
}
-func GenTestOrigLine() *otlpprofiles.Line {
- orig := NewOrigLine()
- orig.FunctionIndex = int32(13)
- orig.Line = int64(13)
- orig.Column = int64(13)
- return orig
+func CopyLineSlice(dest, src []Line) []Line {
+ var newDest []Line
+ if cap(dest) < len(src) {
+ newDest = make([]Line, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLine(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLine(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLinePtrSlice(dest, src []*Line) []*Line {
+ var newDest []*Line
+ if cap(dest) < len(src) {
+ newDest = make([]*Line, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLine()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLine(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLine()
+ }
+ }
+ for i := range src {
+ CopyLine(newDest[i], src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigLine(orig *otlpprofiles.Line, dest *json.Stream) {
+func (orig *Line) Reset() {
+ *orig = Line{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Line) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.FunctionIndex != int32(0) {
dest.WriteObjectField("functionIndex")
@@ -82,8 +144,8 @@ func MarshalJSONOrigLine(orig *otlpprofiles.Line, dest *json.Stream) {
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigLine unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigLine(orig *otlpprofiles.Line, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Line) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "functionIndex", "function_index":
@@ -98,7 +160,7 @@ func UnmarshalJSONOrigLine(orig *otlpprofiles.Line, iter *json.Iterator) {
}
}
-func SizeProtoOrigLine(orig *otlpprofiles.Line) int {
+func (orig *Line) SizeProto() int {
var n int
var l int
_ = l
@@ -114,7 +176,7 @@ func SizeProtoOrigLine(orig *otlpprofiles.Line) int {
return n
}
-func MarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) int {
+func (orig *Line) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
@@ -136,7 +198,7 @@ func MarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) int {
return len(buf) - pos
}
-func UnmarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) error {
+func (orig *Line) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -195,3 +257,28 @@ func UnmarshalProtoOrigLine(orig *otlpprofiles.Line, buf []byte) error {
}
return nil
}
+
+func GenTestLine() *Line {
+ orig := NewLine()
+ orig.FunctionIndex = int32(13)
+ orig.Line = int64(13)
+ orig.Column = int64(13)
+ return orig
+}
+
+func GenTestLinePtrSlice() []*Line {
+ orig := make([]*Line, 5)
+ orig[0] = NewLine()
+ orig[1] = GenTestLine()
+ orig[2] = NewLine()
+ orig[3] = GenTestLine()
+ orig[4] = NewLine()
+ return orig
+}
+
+func GenTestLineSlice() []Line {
+ orig := make([]Line, 5)
+ orig[1] = *GenTestLine()
+ orig[3] = *GenTestLine()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go
new file mode 100644
index 000000000..26962ad27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go
@@ -0,0 +1,267 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Link represents a pointer from a profile Sample to a trace Span.
+type Link struct {
+ TraceId TraceID
+ SpanId SpanID
+}
+
+var (
+ protoPoolLink = sync.Pool{
+ New: func() any {
+ return &Link{}
+ },
+ }
+)
+
+func NewLink() *Link {
+ if !UseProtoPooling.IsEnabled() {
+ return &Link{}
+ }
+ return protoPoolLink.Get().(*Link)
+}
+
+func DeleteLink(orig *Link, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolLink.Put(orig)
+ }
+}
+
+func CopyLink(dest, src *Link) *Link {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLink()
+ }
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ return dest
+}
+
+func CopyLinkSlice(dest, src []Link) []Link {
+ var newDest []Link
+ if cap(dest) < len(src) {
+ newDest = make([]Link, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLink(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLink(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLinkPtrSlice(dest, src []*Link) []*Link {
+ var newDest []*Link
+ if cap(dest) < len(src) {
+ newDest = make([]*Link, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLink()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLink(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLink()
+ }
+ }
+ for i := range src {
+ CopyLink(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Link) Reset() {
+ *orig = Link{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Link) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if !orig.TraceId.IsEmpty() {
+ dest.WriteObjectField("traceId")
+ orig.TraceId.MarshalJSON(dest)
+ }
+ if !orig.SpanId.IsEmpty() {
+ dest.WriteObjectField("spanId")
+ orig.SpanId.MarshalJSON(dest)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Link) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "traceId", "trace_id":
+
+ orig.TraceId.UnmarshalJSON(iter)
+ case "spanId", "span_id":
+
+ orig.SpanId.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Link) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.TraceId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *Link) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.TraceId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ l = orig.SpanId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *Link) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLink() *Link {
+ orig := NewLink()
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ return orig
+}
+
+func GenTestLinkPtrSlice() []*Link {
+ orig := make([]*Link, 5)
+ orig[0] = NewLink()
+ orig[1] = GenTestLink()
+ orig[2] = NewLink()
+ orig[3] = GenTestLink()
+ orig[4] = NewLink()
+ return orig
+}
+
+func GenTestLinkSlice() []Link {
+ orig := make([]Link, 5)
+ orig[1] = *GenTestLink()
+ orig[3] = *GenTestLink()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go
new file mode 100644
index 000000000..bceb75561
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go
@@ -0,0 +1,371 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Location describes function and line table debug information.
+type Location struct {
+ MappingIndex int32
+ Address uint64
+ Lines []*Line
+ AttributeIndices []int32
+}
+
+var (
+ protoPoolLocation = sync.Pool{
+ New: func() any {
+ return &Location{}
+ },
+ }
+)
+
+func NewLocation() *Location {
+ if !UseProtoPooling.IsEnabled() {
+ return &Location{}
+ }
+ return protoPoolLocation.Get().(*Location)
+}
+
+func DeleteLocation(orig *Location, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Lines {
+ DeleteLine(orig.Lines[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolLocation.Put(orig)
+ }
+}
+
+func CopyLocation(dest, src *Location) *Location {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLocation()
+ }
+ dest.MappingIndex = src.MappingIndex
+
+ dest.Address = src.Address
+
+ dest.Lines = CopyLinePtrSlice(dest.Lines, src.Lines)
+
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+
+ return dest
+}
+
+func CopyLocationSlice(dest, src []Location) []Location {
+ var newDest []Location
+ if cap(dest) < len(src) {
+ newDest = make([]Location, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLocation(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLocation(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLocationPtrSlice(dest, src []*Location) []*Location {
+ var newDest []*Location
+ if cap(dest) < len(src) {
+ newDest = make([]*Location, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLocation()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLocation(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLocation()
+ }
+ }
+ for i := range src {
+ CopyLocation(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Location) Reset() {
+ *orig = Location{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Location) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.MappingIndex != int32(0) {
+ dest.WriteObjectField("mappingIndex")
+ dest.WriteInt32(orig.MappingIndex)
+ }
+ if orig.Address != uint64(0) {
+ dest.WriteObjectField("address")
+ dest.WriteUint64(orig.Address)
+ }
+ if len(orig.Lines) > 0 {
+ dest.WriteObjectField("lines")
+ dest.WriteArrayStart()
+ orig.Lines[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Lines); i++ {
+ dest.WriteMore()
+ orig.Lines[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.AttributeIndices) > 0 {
+ dest.WriteObjectField("attributeIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.AttributeIndices[0])
+ for i := 1; i < len(orig.AttributeIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.AttributeIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Location) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "mappingIndex", "mapping_index":
+ orig.MappingIndex = iter.ReadInt32()
+ case "address":
+ orig.Address = iter.ReadUint64()
+ case "lines":
+ for iter.ReadArray() {
+ orig.Lines = append(orig.Lines, NewLine())
+ orig.Lines[len(orig.Lines)-1].UnmarshalJSON(iter)
+ }
+
+ case "attributeIndices", "attribute_indices":
+ for iter.ReadArray() {
+ orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Location) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.MappingIndex != 0 {
+ n += 1 + proto.Sov(uint64(orig.MappingIndex))
+ }
+ if orig.Address != 0 {
+ n += 1 + proto.Sov(uint64(orig.Address))
+ }
+ for i := range orig.Lines {
+ l = orig.Lines[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if len(orig.AttributeIndices) > 0 {
+ l = 0
+ for _, e := range orig.AttributeIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Location) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.MappingIndex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.MappingIndex))
+ pos--
+ buf[pos] = 0x8
+ }
+ if orig.Address != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Address))
+ pos--
+ buf[pos] = 0x10
+ }
+ for i := len(orig.Lines) - 1; i >= 0; i-- {
+ l = orig.Lines[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ l = len(orig.AttributeIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x22
+ }
+ return len(buf) - pos
+}
+
+func (orig *Location) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.MappingIndex = int32(num)
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Address = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lines", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Lines = append(orig.Lines, NewLine())
+ err = orig.Lines[len(orig.Lines)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ case 4:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLocation() *Location {
+ orig := NewLocation()
+ orig.MappingIndex = int32(13)
+ orig.Address = uint64(13)
+ orig.Lines = []*Line{{}, GenTestLine()}
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestLocationPtrSlice() []*Location {
+ orig := make([]*Location, 5)
+ orig[0] = NewLocation()
+ orig[1] = GenTestLocation()
+ orig[2] = NewLocation()
+ orig[3] = GenTestLocation()
+ orig[4] = NewLocation()
+ return orig
+}
+
+func GenTestLocationSlice() []Location {
+ orig := make([]Location, 5)
+ orig[1] = *GenTestLocation()
+ orig[3] = *GenTestLocation()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go
similarity index 64%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecord.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go
index a43fdb38b..8ef100c13 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecord.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go
@@ -11,29 +11,42 @@ import (
"fmt"
"sync"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// LogRecord are experimental implementation of OpenTelemetry Log Data Model.
+
+type LogRecord struct {
+ TimeUnixNano uint64
+ ObservedTimeUnixNano uint64
+ SeverityNumber SeverityNumber
+ SeverityText string
+ Body AnyValue
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ Flags uint32
+ TraceId TraceID
+ SpanId SpanID
+ EventName string
+}
+
var (
protoPoolLogRecord = sync.Pool{
New: func() any {
- return &otlplogs.LogRecord{}
+ return &LogRecord{}
},
}
)
-func NewOrigLogRecord() *otlplogs.LogRecord {
+func NewLogRecord() *LogRecord {
if !UseProtoPooling.IsEnabled() {
- return &otlplogs.LogRecord{}
+ return &LogRecord{}
}
- return protoPoolLogRecord.Get().(*otlplogs.LogRecord)
+ return protoPoolLogRecord.Get().(*LogRecord)
}
-func DeleteOrigLogRecord(orig *otlplogs.LogRecord, nullable bool) {
+func DeleteLogRecord(orig *LogRecord, nullable bool) {
if orig == nil {
return
}
@@ -43,12 +56,12 @@ func DeleteOrigLogRecord(orig *otlplogs.LogRecord, nullable bool) {
return
}
- DeleteOrigAnyValue(&orig.Body, false)
+ DeleteAnyValue(&orig.Body, false)
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
- DeleteOrigTraceID(&orig.TraceId, false)
- DeleteOrigSpanID(&orig.SpanId, false)
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
orig.Reset()
if nullable {
@@ -56,42 +69,98 @@ func DeleteOrigLogRecord(orig *otlplogs.LogRecord, nullable bool) {
}
}
-func CopyOrigLogRecord(dest, src *otlplogs.LogRecord) {
+func CopyLogRecord(dest, src *LogRecord) *LogRecord {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLogRecord()
}
dest.TimeUnixNano = src.TimeUnixNano
+
dest.ObservedTimeUnixNano = src.ObservedTimeUnixNano
+
dest.SeverityNumber = src.SeverityNumber
+
dest.SeverityText = src.SeverityText
- CopyOrigAnyValue(&dest.Body, &src.Body)
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+
+ CopyAnyValue(&dest.Body, &src.Body)
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.DroppedAttributesCount = src.DroppedAttributesCount
+
dest.Flags = src.Flags
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
+
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
dest.EventName = src.EventName
+
+ return dest
}
-func GenTestOrigLogRecord() *otlplogs.LogRecord {
- orig := NewOrigLogRecord()
- orig.TimeUnixNano = 1234567890
- orig.ObservedTimeUnixNano = 1234567890
- orig.SeverityNumber = otlplogs.SeverityNumber(5)
- orig.SeverityText = "test_severitytext"
- orig.Body = *GenTestOrigAnyValue()
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.DroppedAttributesCount = uint32(13)
- orig.Flags = 1
- orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
- orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
- orig.EventName = "test_eventname"
- return orig
+func CopyLogRecordSlice(dest, src []LogRecord) []LogRecord {
+ var newDest []LogRecord
+ if cap(dest) < len(src) {
+ newDest = make([]LogRecord, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogRecord(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLogRecord(&newDest[i], &src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) {
+func CopyLogRecordPtrSlice(dest, src []*LogRecord) []*LogRecord {
+ var newDest []*LogRecord
+ if cap(dest) < len(src) {
+ newDest = make([]*LogRecord, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogRecord()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogRecord(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogRecord()
+ }
+ }
+ for i := range src {
+ CopyLogRecord(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *LogRecord) Reset() {
+ *orig = LogRecord{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *LogRecord) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
@@ -111,14 +180,14 @@ func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) {
dest.WriteString(orig.SeverityText)
}
dest.WriteObjectField("body")
- MarshalJSONOrigAnyValue(&orig.Body, dest)
+ orig.Body.MarshalJSON(dest)
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -130,13 +199,13 @@ func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
- if orig.TraceId != data.TraceID([16]byte{}) {
+ if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
- MarshalJSONOrigTraceID(&orig.TraceId, dest)
+ orig.TraceId.MarshalJSON(dest)
}
- if orig.SpanId != data.SpanID([8]byte{}) {
+ if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
- MarshalJSONOrigSpanID(&orig.SpanId, dest)
+ orig.SpanId.MarshalJSON(dest)
}
if orig.EventName != "" {
dest.WriteObjectField("eventName")
@@ -145,8 +214,8 @@ func MarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) {
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigLogRecord unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *LogRecord) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "timeUnixNano", "time_unix_nano":
@@ -154,15 +223,16 @@ func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) {
case "observedTimeUnixNano", "observed_time_unix_nano":
orig.ObservedTimeUnixNano = iter.ReadUint64()
case "severityNumber", "severity_number":
- orig.SeverityNumber = otlplogs.SeverityNumber(iter.ReadEnumValue(otlplogs.SeverityNumber_value))
+ orig.SeverityNumber = SeverityNumber(iter.ReadEnumValue(SeverityNumber_value))
case "severityText", "severity_text":
orig.SeverityText = iter.ReadString()
case "body":
- UnmarshalJSONOrigAnyValue(&orig.Body, iter)
+
+ orig.Body.UnmarshalJSON(iter)
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
@@ -170,9 +240,11 @@ func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) {
case "flags":
orig.Flags = iter.ReadUint32()
case "traceId", "trace_id":
- UnmarshalJSONOrigTraceID(&orig.TraceId, iter)
+
+ orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
- UnmarshalJSONOrigSpanID(&orig.SpanId, iter)
+
+ orig.SpanId.UnmarshalJSON(iter)
case "eventName", "event_name":
orig.EventName = iter.ReadString()
default:
@@ -181,7 +253,7 @@ func UnmarshalJSONOrigLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) {
}
}
-func SizeProtoOrigLogRecord(orig *otlplogs.LogRecord) int {
+func (orig *LogRecord) SizeProto() int {
var n int
var l int
_ = l
@@ -198,10 +270,10 @@ func SizeProtoOrigLogRecord(orig *otlplogs.LogRecord) int {
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
- l = SizeProtoOrigAnyValue(&orig.Body)
+ l = orig.Body.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
@@ -210,9 +282,9 @@ func SizeProtoOrigLogRecord(orig *otlplogs.LogRecord) int {
if orig.Flags != 0 {
n += 5
}
- l = SizeProtoOrigTraceID(&orig.TraceId)
+ l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
- l = SizeProtoOrigSpanID(&orig.SpanId)
+ l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.EventName)
if l > 0 {
@@ -221,7 +293,7 @@ func SizeProtoOrigLogRecord(orig *otlplogs.LogRecord) int {
return n
}
-func MarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) int {
+func (orig *LogRecord) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
@@ -250,15 +322,14 @@ func MarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) int {
pos--
buf[pos] = 0x1a
}
-
- l = MarshalProtoOrigAnyValue(&orig.Body, buf[:pos])
+ l = orig.Body.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -275,14 +346,13 @@ func MarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) int {
pos--
buf[pos] = 0x45
}
-
- l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos])
+ l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
- l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos])
+ l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -299,7 +369,7 @@ func MarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) int {
return len(buf) - pos
}
-func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
+func (orig *LogRecord) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -348,7 +418,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
return err
}
- orig.SeverityNumber = otlplogs.SeverityNumber(num)
+ orig.SeverityNumber = SeverityNumber(num)
case 3:
if wireType != proto.WireTypeLen {
@@ -373,7 +443,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigAnyValue(&orig.Body, buf[startPos:pos])
+ err = orig.Body.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -388,8 +458,8 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -429,7 +499,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos])
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -445,7 +515,7 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos])
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -470,3 +540,36 @@ func UnmarshalProtoOrigLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
}
return nil
}
+
+func GenTestLogRecord() *LogRecord {
+ orig := NewLogRecord()
+ orig.TimeUnixNano = uint64(13)
+ orig.ObservedTimeUnixNano = uint64(13)
+ orig.SeverityNumber = SeverityNumber(13)
+ orig.SeverityText = "test_severitytext"
+ orig.Body = *GenTestAnyValue()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.Flags = uint32(13)
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ orig.EventName = "test_eventname"
+ return orig
+}
+
+func GenTestLogRecordPtrSlice() []*LogRecord {
+ orig := make([]*LogRecord, 5)
+ orig[0] = NewLogRecord()
+ orig[1] = GenTestLogRecord()
+ orig[2] = NewLogRecord()
+ orig[3] = GenTestLogRecord()
+ orig[4] = NewLogRecord()
+ return orig
+}
+
+func GenTestLogRecordSlice() []LogRecord {
+ orig := make([]LogRecord, 5)
+ orig[1] = *GenTestLogRecord()
+ orig[3] = *GenTestLogRecord()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go
new file mode 100644
index 000000000..8b7cf668f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go
@@ -0,0 +1,247 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// LogsData represents the logs data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP logs data but do not
+// implement the OTLP protocol.
+type LogsData struct {
+ ResourceLogs []*ResourceLogs
+}
+
+var (
+ protoPoolLogsData = sync.Pool{
+ New: func() any {
+ return &LogsData{}
+ },
+ }
+)
+
+func NewLogsData() *LogsData {
+ if !UseProtoPooling.IsEnabled() {
+ return &LogsData{}
+ }
+ return protoPoolLogsData.Get().(*LogsData)
+}
+
+func DeleteLogsData(orig *LogsData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceLogs {
+ DeleteResourceLogs(orig.ResourceLogs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolLogsData.Put(orig)
+ }
+}
+
+func CopyLogsData(dest, src *LogsData) *LogsData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLogsData()
+ }
+ dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs)
+
+ return dest
+}
+
+func CopyLogsDataSlice(dest, src []LogsData) []LogsData {
+ var newDest []LogsData
+ if cap(dest) < len(src) {
+ newDest = make([]LogsData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLogsData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLogsDataPtrSlice(dest, src []*LogsData) []*LogsData {
+ var newDest []*LogsData
+ if cap(dest) < len(src) {
+ newDest = make([]*LogsData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsData()
+ }
+ }
+ for i := range src {
+ CopyLogsData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *LogsData) Reset() {
+ *orig = LogsData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *LogsData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceLogs) > 0 {
+ dest.WriteObjectField("resourceLogs")
+ dest.WriteArrayStart()
+ orig.ResourceLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceLogs); i++ {
+ dest.WriteMore()
+ orig.ResourceLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *LogsData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceLogs", "resource_logs":
+ for iter.ReadArray() {
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *LogsData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceLogs {
+ l = orig.ResourceLogs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *LogsData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
+ l = orig.ResourceLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *LogsData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLogsData() *LogsData {
+ orig := NewLogsData()
+ orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()}
+ return orig
+}
+
+func GenTestLogsDataPtrSlice() []*LogsData {
+ orig := make([]*LogsData, 5)
+ orig[0] = NewLogsData()
+ orig[1] = GenTestLogsData()
+ orig[2] = NewLogsData()
+ orig[3] = GenTestLogsData()
+ orig[4] = NewLogsData()
+ return orig
+}
+
+func GenTestLogsDataSlice() []LogsData {
+ orig := make([]LogsData, 5)
+ orig[1] = *GenTestLogsData()
+ orig[3] = *GenTestLogsData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go
new file mode 100644
index 000000000..60bae8a78
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type LogsRequest struct {
+ RequestContext *RequestContext
+ LogsData LogsData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolLogsRequest = sync.Pool{
+ New: func() any {
+ return &LogsRequest{}
+ },
+ }
+)
+
+func NewLogsRequest() *LogsRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &LogsRequest{}
+ }
+ return protoPoolLogsRequest.Get().(*LogsRequest)
+}
+
+func DeleteLogsRequest(orig *LogsRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteLogsData(&orig.LogsData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolLogsRequest.Put(orig)
+ }
+}
+
+func CopyLogsRequest(dest, src *LogsRequest) *LogsRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLogsRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyLogsData(&dest.LogsData, &src.LogsData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyLogsRequestSlice(dest, src []LogsRequest) []LogsRequest {
+ var newDest []LogsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]LogsRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLogsRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLogsRequestPtrSlice(dest, src []*LogsRequest) []*LogsRequest {
+ var newDest []*LogsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*LogsRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsRequest()
+ }
+ }
+ for i := range src {
+ CopyLogsRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *LogsRequest) Reset() {
+ *orig = LogsRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *LogsRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("logsData")
+ orig.LogsData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *LogsRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "logsData", "logs_data":
+
+ orig.LogsData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *LogsRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.LogsData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *LogsRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.LogsData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *LogsRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogsData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.LogsData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLogsRequest() *LogsRequest {
+ orig := NewLogsRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.LogsData = *GenTestLogsData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestLogsRequestPtrSlice() []*LogsRequest {
+ orig := make([]*LogsRequest, 5)
+ orig[0] = NewLogsRequest()
+ orig[1] = GenTestLogsRequest()
+ orig[2] = NewLogsRequest()
+ orig[3] = GenTestLogsRequest()
+ orig[4] = NewLogsRequest()
+ return orig
+}
+
+func GenTestLogsRequestSlice() []LogsRequest {
+ orig := make([]LogsRequest, 5)
+ orig[1] = *GenTestLogsRequest()
+ orig[3] = *GenTestLogsRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mapping.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go
similarity index 55%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mapping.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go
index eb4ba35a9..41b746247 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mapping.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go
@@ -10,27 +10,35 @@ import (
"fmt"
"sync"
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID
+type Mapping struct {
+ MemoryStart uint64
+ MemoryLimit uint64
+ FileOffset uint64
+ FilenameStrindex int32
+ AttributeIndices []int32
+}
+
var (
protoPoolMapping = sync.Pool{
New: func() any {
- return &otlpprofiles.Mapping{}
+ return &Mapping{}
},
}
)
-func NewOrigMapping() *otlpprofiles.Mapping {
+func NewMapping() *Mapping {
if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.Mapping{}
+ return &Mapping{}
}
- return protoPoolMapping.Get().(*otlpprofiles.Mapping)
+ return protoPoolMapping.Get().(*Mapping)
}
-func DeleteOrigMapping(orig *otlpprofiles.Mapping, nullable bool) {
+func DeleteMapping(orig *Mapping, nullable bool) {
if orig == nil {
return
}
@@ -46,38 +54,86 @@ func DeleteOrigMapping(orig *otlpprofiles.Mapping, nullable bool) {
}
}
-func CopyOrigMapping(dest, src *otlpprofiles.Mapping) {
+func CopyMapping(dest, src *Mapping) *Mapping {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMapping()
}
dest.MemoryStart = src.MemoryStart
+
dest.MemoryLimit = src.MemoryLimit
+
dest.FileOffset = src.FileOffset
+
dest.FilenameStrindex = src.FilenameStrindex
- dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices)
- dest.HasFunctions = src.HasFunctions
- dest.HasFilenames = src.HasFilenames
- dest.HasLineNumbers = src.HasLineNumbers
- dest.HasInlineFrames = src.HasInlineFrames
+
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+
+ return dest
}
-func GenTestOrigMapping() *otlpprofiles.Mapping {
- orig := NewOrigMapping()
- orig.MemoryStart = uint64(13)
- orig.MemoryLimit = uint64(13)
- orig.FileOffset = uint64(13)
- orig.FilenameStrindex = int32(13)
- orig.AttributeIndices = GenerateOrigTestInt32Slice()
- orig.HasFunctions = true
- orig.HasFilenames = true
- orig.HasLineNumbers = true
- orig.HasInlineFrames = true
- return orig
+func CopyMappingSlice(dest, src []Mapping) []Mapping {
+ var newDest []Mapping
+ if cap(dest) < len(src) {
+ newDest = make([]Mapping, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMapping(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMapping(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMappingPtrSlice(dest, src []*Mapping) []*Mapping {
+ var newDest []*Mapping
+ if cap(dest) < len(src) {
+ newDest = make([]*Mapping, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMapping()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMapping(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMapping()
+ }
+ }
+ for i := range src {
+ CopyMapping(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Mapping) Reset() {
+ *orig = Mapping{}
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigMapping(orig *otlpprofiles.Mapping, dest *json.Stream) {
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Mapping) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.MemoryStart != uint64(0) {
dest.WriteObjectField("memoryStart")
@@ -105,27 +161,11 @@ func MarshalJSONOrigMapping(orig *otlpprofiles.Mapping, dest *json.Stream) {
}
dest.WriteArrayEnd()
}
- if orig.HasFunctions != false {
- dest.WriteObjectField("hasFunctions")
- dest.WriteBool(orig.HasFunctions)
- }
- if orig.HasFilenames != false {
- dest.WriteObjectField("hasFilenames")
- dest.WriteBool(orig.HasFilenames)
- }
- if orig.HasLineNumbers != false {
- dest.WriteObjectField("hasLineNumbers")
- dest.WriteBool(orig.HasLineNumbers)
- }
- if orig.HasInlineFrames != false {
- dest.WriteObjectField("hasInlineFrames")
- dest.WriteBool(orig.HasInlineFrames)
- }
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigMapping unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigMapping(orig *otlpprofiles.Mapping, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Mapping) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "memoryStart", "memory_start":
@@ -141,21 +181,13 @@ func UnmarshalJSONOrigMapping(orig *otlpprofiles.Mapping, iter *json.Iterator) {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
- case "hasFunctions", "has_functions":
- orig.HasFunctions = iter.ReadBool()
- case "hasFilenames", "has_filenames":
- orig.HasFilenames = iter.ReadBool()
- case "hasLineNumbers", "has_line_numbers":
- orig.HasLineNumbers = iter.ReadBool()
- case "hasInlineFrames", "has_inline_frames":
- orig.HasInlineFrames = iter.ReadBool()
default:
iter.Skip()
}
}
}
-func SizeProtoOrigMapping(orig *otlpprofiles.Mapping) int {
+func (orig *Mapping) SizeProto() int {
var n int
var l int
_ = l
@@ -178,22 +210,10 @@ func SizeProtoOrigMapping(orig *otlpprofiles.Mapping) int {
}
n += 1 + proto.Sov(uint64(l)) + l
}
- if orig.HasFunctions {
- n += 2
- }
- if orig.HasFilenames {
- n += 2
- }
- if orig.HasLineNumbers {
- n += 2
- }
- if orig.HasInlineFrames {
- n += 2
- }
return n
}
-func MarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) int {
+func (orig *Mapping) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
@@ -227,50 +247,10 @@ func MarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) int {
pos--
buf[pos] = 0x2a
}
- if orig.HasFunctions {
- pos--
- if orig.HasFunctions {
- buf[pos] = 1
- } else {
- buf[pos] = 0
- }
- pos--
- buf[pos] = 0x30
- }
- if orig.HasFilenames {
- pos--
- if orig.HasFilenames {
- buf[pos] = 1
- } else {
- buf[pos] = 0
- }
- pos--
- buf[pos] = 0x38
- }
- if orig.HasLineNumbers {
- pos--
- if orig.HasLineNumbers {
- buf[pos] = 1
- } else {
- buf[pos] = 0
- }
- pos--
- buf[pos] = 0x40
- }
- if orig.HasInlineFrames {
- pos--
- if orig.HasInlineFrames {
- buf[pos] = 1
- } else {
- buf[pos] = 0
- }
- pos--
- buf[pos] = 0x48
- }
return len(buf) - pos
}
-func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error {
+func (orig *Mapping) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -333,74 +313,35 @@ func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error {
orig.FilenameStrindex = int32(num)
case 5:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
- }
-
- case 6:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field HasFunctions", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.HasFunctions = num != 0
-
- case 7:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field HasFilenames", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.HasFilenames = num != 0
-
- case 8:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field HasLineNumbers", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.HasLineNumbers = num != 0
-
- case 9:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field HasInlineFrames", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.HasInlineFrames = num != 0
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
@@ -410,3 +351,30 @@ func UnmarshalProtoOrigMapping(orig *otlpprofiles.Mapping, buf []byte) error {
}
return nil
}
+
+func GenTestMapping() *Mapping {
+ orig := NewMapping()
+ orig.MemoryStart = uint64(13)
+ orig.MemoryLimit = uint64(13)
+ orig.FileOffset = uint64(13)
+ orig.FilenameStrindex = int32(13)
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestMappingPtrSlice() []*Mapping {
+ orig := make([]*Mapping, 5)
+ orig[0] = NewMapping()
+ orig[1] = GenTestMapping()
+ orig[2] = NewMapping()
+ orig[3] = GenTestMapping()
+ orig[4] = NewMapping()
+ return orig
+}
+
+func GenTestMappingSlice() []Mapping {
+ orig := make([]Mapping, 5)
+ orig[1] = *GenTestMapping()
+ orig[3] = *GenTestMapping()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go
new file mode 100644
index 000000000..b7f85b27a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go
@@ -0,0 +1,804 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *Metric) GetData() any {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type Metric_Gauge struct {
+ Gauge *Gauge
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if v, ok := m.GetData().(*Metric_Gauge); ok {
+ return v.Gauge
+ }
+ return nil
+}
+
+type Metric_Sum struct {
+ Sum *Sum
+}
+
+func (m *Metric) GetSum() *Sum {
+ if v, ok := m.GetData().(*Metric_Sum); ok {
+ return v.Sum
+ }
+ return nil
+}
+
+type Metric_Histogram struct {
+ Histogram *Histogram
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if v, ok := m.GetData().(*Metric_Histogram); ok {
+ return v.Histogram
+ }
+ return nil
+}
+
+type Metric_ExponentialHistogram struct {
+ ExponentialHistogram *ExponentialHistogram
+}
+
+func (m *Metric) GetExponentialHistogram() *ExponentialHistogram {
+ if v, ok := m.GetData().(*Metric_ExponentialHistogram); ok {
+ return v.ExponentialHistogram
+ }
+ return nil
+}
+
+type Metric_Summary struct {
+ Summary *Summary
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if v, ok := m.GetData().(*Metric_Summary); ok {
+ return v.Summary
+ }
+ return nil
+}
+
+// Metric represents one metric as a collection of datapoints.
+// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto
+type Metric struct {
+ Name string
+ Description string
+ Unit string
+ Data any
+ Metadata []KeyValue
+}
+
+var (
+ protoPoolMetric = sync.Pool{
+ New: func() any {
+ return &Metric{}
+ },
+ }
+
+ ProtoPoolMetric_Gauge = sync.Pool{
+ New: func() any {
+ return &Metric_Gauge{}
+ },
+ }
+
+ ProtoPoolMetric_Sum = sync.Pool{
+ New: func() any {
+ return &Metric_Sum{}
+ },
+ }
+
+ ProtoPoolMetric_Histogram = sync.Pool{
+ New: func() any {
+ return &Metric_Histogram{}
+ },
+ }
+
+ ProtoPoolMetric_ExponentialHistogram = sync.Pool{
+ New: func() any {
+ return &Metric_ExponentialHistogram{}
+ },
+ }
+
+ ProtoPoolMetric_Summary = sync.Pool{
+ New: func() any {
+ return &Metric_Summary{}
+ },
+ }
+)
+
+func NewMetric() *Metric {
+ if !UseProtoPooling.IsEnabled() {
+ return &Metric{}
+ }
+ return protoPoolMetric.Get().(*Metric)
+}
+
+func DeleteMetric(orig *Metric, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ switch ov := orig.Data.(type) {
+ case *Metric_Gauge:
+ DeleteGauge(ov.Gauge, true)
+ ov.Gauge = nil
+ ProtoPoolMetric_Gauge.Put(ov)
+ case *Metric_Sum:
+ DeleteSum(ov.Sum, true)
+ ov.Sum = nil
+ ProtoPoolMetric_Sum.Put(ov)
+ case *Metric_Histogram:
+ DeleteHistogram(ov.Histogram, true)
+ ov.Histogram = nil
+ ProtoPoolMetric_Histogram.Put(ov)
+ case *Metric_ExponentialHistogram:
+ DeleteExponentialHistogram(ov.ExponentialHistogram, true)
+ ov.ExponentialHistogram = nil
+ ProtoPoolMetric_ExponentialHistogram.Put(ov)
+ case *Metric_Summary:
+ DeleteSummary(ov.Summary, true)
+ ov.Summary = nil
+ ProtoPoolMetric_Summary.Put(ov)
+
+ }
+ for i := range orig.Metadata {
+ DeleteKeyValue(&orig.Metadata[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolMetric.Put(orig)
+ }
+}
+
+func CopyMetric(dest, src *Metric) *Metric {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMetric()
+ }
+ dest.Name = src.Name
+
+ dest.Description = src.Description
+
+ dest.Unit = src.Unit
+
+ switch t := src.Data.(type) {
+ case *Metric_Gauge:
+ var ov *Metric_Gauge
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Gauge{}
+ } else {
+ ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
+ }
+ ov.Gauge = NewGauge()
+ CopyGauge(ov.Gauge, t.Gauge)
+ dest.Data = ov
+
+ case *Metric_Sum:
+ var ov *Metric_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Sum{}
+ } else {
+ ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
+ }
+ ov.Sum = NewSum()
+ CopySum(ov.Sum, t.Sum)
+ dest.Data = ov
+
+ case *Metric_Histogram:
+ var ov *Metric_Histogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Histogram{}
+ } else {
+ ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
+ }
+ ov.Histogram = NewHistogram()
+ CopyHistogram(ov.Histogram, t.Histogram)
+ dest.Data = ov
+
+ case *Metric_ExponentialHistogram:
+ var ov *Metric_ExponentialHistogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_ExponentialHistogram{}
+ } else {
+ ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
+ }
+ ov.ExponentialHistogram = NewExponentialHistogram()
+ CopyExponentialHistogram(ov.ExponentialHistogram, t.ExponentialHistogram)
+ dest.Data = ov
+
+ case *Metric_Summary:
+ var ov *Metric_Summary
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Summary{}
+ } else {
+ ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
+ }
+ ov.Summary = NewSummary()
+ CopySummary(ov.Summary, t.Summary)
+ dest.Data = ov
+
+ default:
+ dest.Data = nil
+ }
+ dest.Metadata = CopyKeyValueSlice(dest.Metadata, src.Metadata)
+
+ return dest
+}
+
+func CopyMetricSlice(dest, src []Metric) []Metric {
+ var newDest []Metric
+ if cap(dest) < len(src) {
+ newDest = make([]Metric, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetric(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMetric(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMetricPtrSlice(dest, src []*Metric) []*Metric {
+ var newDest []*Metric
+ if cap(dest) < len(src) {
+ newDest = make([]*Metric, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetric()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetric(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetric()
+ }
+ }
+ for i := range src {
+ CopyMetric(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Metric) Reset() {
+ *orig = Metric{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Metric) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+ if orig.Description != "" {
+ dest.WriteObjectField("description")
+ dest.WriteString(orig.Description)
+ }
+ if orig.Unit != "" {
+ dest.WriteObjectField("unit")
+ dest.WriteString(orig.Unit)
+ }
+ switch orig := orig.Data.(type) {
+ case *Metric_Gauge:
+ if orig.Gauge != nil {
+ dest.WriteObjectField("gauge")
+ orig.Gauge.MarshalJSON(dest)
+ }
+ case *Metric_Sum:
+ if orig.Sum != nil {
+ dest.WriteObjectField("sum")
+ orig.Sum.MarshalJSON(dest)
+ }
+ case *Metric_Histogram:
+ if orig.Histogram != nil {
+ dest.WriteObjectField("histogram")
+ orig.Histogram.MarshalJSON(dest)
+ }
+ case *Metric_ExponentialHistogram:
+ if orig.ExponentialHistogram != nil {
+ dest.WriteObjectField("exponentialHistogram")
+ orig.ExponentialHistogram.MarshalJSON(dest)
+ }
+ case *Metric_Summary:
+ if orig.Summary != nil {
+ dest.WriteObjectField("summary")
+ orig.Summary.MarshalJSON(dest)
+ }
+ }
+ if len(orig.Metadata) > 0 {
+ dest.WriteObjectField("metadata")
+ dest.WriteArrayStart()
+ orig.Metadata[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Metadata); i++ {
+ dest.WriteMore()
+ orig.Metadata[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Metric) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "name":
+ orig.Name = iter.ReadString()
+ case "description":
+ orig.Description = iter.ReadString()
+ case "unit":
+ orig.Unit = iter.ReadString()
+
+ case "gauge":
+ {
+ var ov *Metric_Gauge
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Gauge{}
+ } else {
+ ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
+ }
+ ov.Gauge = NewGauge()
+ ov.Gauge.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "sum":
+ {
+ var ov *Metric_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Sum{}
+ } else {
+ ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
+ }
+ ov.Sum = NewSum()
+ ov.Sum.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "histogram":
+ {
+ var ov *Metric_Histogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Histogram{}
+ } else {
+ ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
+ }
+ ov.Histogram = NewHistogram()
+ ov.Histogram.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "exponentialHistogram", "exponential_histogram":
+ {
+ var ov *Metric_ExponentialHistogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_ExponentialHistogram{}
+ } else {
+ ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
+ }
+ ov.ExponentialHistogram = NewExponentialHistogram()
+ ov.ExponentialHistogram.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "summary":
+ {
+ var ov *Metric_Summary
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Summary{}
+ } else {
+ ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
+ }
+ ov.Summary = NewSummary()
+ ov.Summary.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "metadata":
+ for iter.ReadArray() {
+ orig.Metadata = append(orig.Metadata, KeyValue{})
+ orig.Metadata[len(orig.Metadata)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Metric) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Description)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Unit)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ switch orig := orig.Data.(type) {
+ case nil:
+ _ = orig
+ break
+ case *Metric_Gauge:
+ if orig.Gauge != nil {
+ l = orig.Gauge.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_Sum:
+ if orig.Sum != nil {
+ l = orig.Sum.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_Histogram:
+ if orig.Histogram != nil {
+ l = orig.Histogram.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_ExponentialHistogram:
+ if orig.ExponentialHistogram != nil {
+ l = orig.ExponentialHistogram.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_Summary:
+ if orig.Summary != nil {
+ l = orig.Summary.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ }
+ for i := range orig.Metadata {
+ l = orig.Metadata[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Metric) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Description)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Description)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.Unit)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Unit)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ switch orig := orig.Data.(type) {
+ case *Metric_Gauge:
+ if orig.Gauge != nil {
+ l = orig.Gauge.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ case *Metric_Sum:
+ if orig.Sum != nil {
+ l = orig.Sum.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+ }
+ case *Metric_Histogram:
+ if orig.Histogram != nil {
+ l = orig.Histogram.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x4a
+ }
+ case *Metric_ExponentialHistogram:
+ if orig.ExponentialHistogram != nil {
+ l = orig.ExponentialHistogram.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x52
+ }
+ case *Metric_Summary:
+ if orig.Summary != nil {
+ l = orig.Summary.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x5a
+ }
+ }
+ for i := len(orig.Metadata) - 1; i >= 0; i-- {
+ l = orig.Metadata[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x62
+ }
+ return len(buf) - pos
+}
+
+func (orig *Metric) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Description = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Unit = string(buf[startPos:pos])
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Gauge
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Gauge{}
+ } else {
+ ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
+ }
+ ov.Gauge = NewGauge()
+ err = ov.Gauge.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Sum{}
+ } else {
+ ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
+ }
+ ov.Sum = NewSum()
+ err = ov.Sum.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 9:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Histogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Histogram{}
+ } else {
+ ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
+ }
+ ov.Histogram = NewHistogram()
+ err = ov.Histogram.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 10:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_ExponentialHistogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_ExponentialHistogram{}
+ } else {
+ ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
+ }
+ ov.ExponentialHistogram = NewExponentialHistogram()
+ err = ov.ExponentialHistogram.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 11:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Summary
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Summary{}
+ } else {
+ ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
+ }
+ ov.Summary = NewSummary()
+ err = ov.Summary.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 12:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Metadata = append(orig.Metadata, KeyValue{})
+ err = orig.Metadata[len(orig.Metadata)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestMetric() *Metric {
+ orig := NewMetric()
+ orig.Name = "test_name"
+ orig.Description = "test_description"
+ orig.Unit = "test_unit"
+ orig.Data = &Metric_Gauge{Gauge: GenTestGauge()}
+ orig.Metadata = []KeyValue{{}, *GenTestKeyValue()}
+ return orig
+}
+
+func GenTestMetricPtrSlice() []*Metric {
+ orig := make([]*Metric, 5)
+ orig[0] = NewMetric()
+ orig[1] = GenTestMetric()
+ orig[2] = NewMetric()
+ orig[3] = GenTestMetric()
+ orig[4] = NewMetric()
+ return orig
+}
+
+func GenTestMetricSlice() []Metric {
+ orig := make([]Metric, 5)
+ orig[1] = *GenTestMetric()
+ orig[3] = *GenTestMetric()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go
new file mode 100644
index 000000000..ad560d67b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go
@@ -0,0 +1,247 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// MetricsData represents the metrics data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP metrics data but do not
+// implement the OTLP protocol..
+type MetricsData struct {
+ ResourceMetrics []*ResourceMetrics
+}
+
+var (
+ protoPoolMetricsData = sync.Pool{
+ New: func() any {
+ return &MetricsData{}
+ },
+ }
+)
+
+func NewMetricsData() *MetricsData {
+ if !UseProtoPooling.IsEnabled() {
+ return &MetricsData{}
+ }
+ return protoPoolMetricsData.Get().(*MetricsData)
+}
+
+func DeleteMetricsData(orig *MetricsData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceMetrics {
+ DeleteResourceMetrics(orig.ResourceMetrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolMetricsData.Put(orig)
+ }
+}
+
+func CopyMetricsData(dest, src *MetricsData) *MetricsData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMetricsData()
+ }
+ dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics)
+
+ return dest
+}
+
+func CopyMetricsDataSlice(dest, src []MetricsData) []MetricsData {
+ var newDest []MetricsData
+ if cap(dest) < len(src) {
+ newDest = make([]MetricsData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMetricsData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMetricsDataPtrSlice(dest, src []*MetricsData) []*MetricsData {
+ var newDest []*MetricsData
+ if cap(dest) < len(src) {
+ newDest = make([]*MetricsData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsData()
+ }
+ }
+ for i := range src {
+ CopyMetricsData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *MetricsData) Reset() {
+ *orig = MetricsData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *MetricsData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceMetrics) > 0 {
+ dest.WriteObjectField("resourceMetrics")
+ dest.WriteArrayStart()
+ orig.ResourceMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceMetrics); i++ {
+ dest.WriteMore()
+ orig.ResourceMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *MetricsData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceMetrics", "resource_metrics":
+ for iter.ReadArray() {
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *MetricsData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceMetrics {
+ l = orig.ResourceMetrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *MetricsData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
+ l = orig.ResourceMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *MetricsData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestMetricsData() *MetricsData {
+ orig := NewMetricsData()
+ orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()}
+ return orig
+}
+
+func GenTestMetricsDataPtrSlice() []*MetricsData {
+ orig := make([]*MetricsData, 5)
+ orig[0] = NewMetricsData()
+ orig[1] = GenTestMetricsData()
+ orig[2] = NewMetricsData()
+ orig[3] = GenTestMetricsData()
+ orig[4] = NewMetricsData()
+ return orig
+}
+
+func GenTestMetricsDataSlice() []MetricsData {
+ orig := make([]MetricsData, 5)
+ orig[1] = *GenTestMetricsData()
+ orig[3] = *GenTestMetricsData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go
new file mode 100644
index 000000000..e4cb0b38f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type MetricsRequest struct {
+ RequestContext *RequestContext
+ MetricsData MetricsData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolMetricsRequest = sync.Pool{
+ New: func() any {
+ return &MetricsRequest{}
+ },
+ }
+)
+
+func NewMetricsRequest() *MetricsRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &MetricsRequest{}
+ }
+ return protoPoolMetricsRequest.Get().(*MetricsRequest)
+}
+
+func DeleteMetricsRequest(orig *MetricsRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteMetricsData(&orig.MetricsData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolMetricsRequest.Put(orig)
+ }
+}
+
+func CopyMetricsRequest(dest, src *MetricsRequest) *MetricsRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMetricsRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyMetricsData(&dest.MetricsData, &src.MetricsData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyMetricsRequestSlice(dest, src []MetricsRequest) []MetricsRequest {
+ var newDest []MetricsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]MetricsRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMetricsRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMetricsRequestPtrSlice(dest, src []*MetricsRequest) []*MetricsRequest {
+ var newDest []*MetricsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*MetricsRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsRequest()
+ }
+ }
+ for i := range src {
+ CopyMetricsRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *MetricsRequest) Reset() {
+ *orig = MetricsRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *MetricsRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("metricsData")
+ orig.MetricsData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *MetricsRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "metricsData", "metrics_data":
+
+ orig.MetricsData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *MetricsRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.MetricsData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *MetricsRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.MetricsData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *MetricsRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field MetricsData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.MetricsData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestMetricsRequest() *MetricsRequest {
+ orig := NewMetricsRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.MetricsData = *GenTestMetricsData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestMetricsRequestPtrSlice() []*MetricsRequest {
+ orig := make([]*MetricsRequest, 5)
+ orig[0] = NewMetricsRequest()
+ orig[1] = GenTestMetricsRequest()
+ orig[2] = NewMetricsRequest()
+ orig[3] = GenTestMetricsRequest()
+ orig[4] = NewMetricsRequest()
+ return orig
+}
+
+func GenTestMetricsRequestSlice() []MetricsRequest {
+ orig := make([]MetricsRequest, 5)
+ orig[1] = *GenTestMetricsRequest()
+ orig[3] = *GenTestMetricsRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go
similarity index 52%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapoint.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go
index 348b0b73f..4b86a7f3a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go
@@ -12,40 +12,77 @@ import (
"math"
"sync"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+func (m *NumberDataPoint) GetValue() any {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type NumberDataPoint_AsDouble struct {
+ AsDouble float64
+}
+
+func (m *NumberDataPoint) GetAsDouble() float64 {
+ if v, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok {
+ return v.AsDouble
+ }
+ return float64(0)
+}
+
+type NumberDataPoint_AsInt struct {
+ AsInt int64
+}
+
+func (m *NumberDataPoint) GetAsInt() int64 {
+ if v, ok := m.GetValue().(*NumberDataPoint_AsInt); ok {
+ return v.AsInt
+ }
+ return int64(0)
+}
+
+// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric.
+type NumberDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Value any
+ Exemplars []Exemplar
+ Flags uint32
+}
+
var (
protoPoolNumberDataPoint = sync.Pool{
New: func() any {
- return &otlpmetrics.NumberDataPoint{}
+ return &NumberDataPoint{}
},
}
ProtoPoolNumberDataPoint_AsDouble = sync.Pool{
New: func() any {
- return &otlpmetrics.NumberDataPoint_AsDouble{}
+ return &NumberDataPoint_AsDouble{}
},
}
ProtoPoolNumberDataPoint_AsInt = sync.Pool{
New: func() any {
- return &otlpmetrics.NumberDataPoint_AsInt{}
+ return &NumberDataPoint_AsInt{}
},
}
)
-func NewOrigNumberDataPoint() *otlpmetrics.NumberDataPoint {
+func NewNumberDataPoint() *NumberDataPoint {
if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.NumberDataPoint{}
+ return &NumberDataPoint{}
}
- return protoPoolNumberDataPoint.Get().(*otlpmetrics.NumberDataPoint)
+ return protoPoolNumberDataPoint.Get().(*NumberDataPoint)
}
-func DeleteOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool) {
+func DeleteNumberDataPoint(orig *NumberDataPoint, nullable bool) {
if orig == nil {
return
}
@@ -56,15 +93,15 @@ func DeleteOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool)
}
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Value.(type) {
- case *otlpmetrics.NumberDataPoint_AsDouble:
+ case *NumberDataPoint_AsDouble:
if UseProtoPooling.IsEnabled() {
ov.AsDouble = float64(0)
ProtoPoolNumberDataPoint_AsDouble.Put(ov)
}
- case *otlpmetrics.NumberDataPoint_AsInt:
+ case *NumberDataPoint_AsInt:
if UseProtoPooling.IsEnabled() {
ov.AsInt = int64(0)
ProtoPoolNumberDataPoint_AsInt.Put(ov)
@@ -72,7 +109,7 @@ func DeleteOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool)
}
for i := range orig.Exemplars {
- DeleteOrigExemplar(&orig.Exemplars[i], false)
+ DeleteExemplar(&orig.Exemplars[i], false)
}
orig.Reset()
@@ -81,59 +118,116 @@ func DeleteOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool)
}
}
-func CopyOrigNumberDataPoint(dest, src *otlpmetrics.NumberDataPoint) {
+func CopyNumberDataPoint(dest, src *NumberDataPoint) *NumberDataPoint {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewNumberDataPoint()
}
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.StartTimeUnixNano = src.StartTimeUnixNano
+
dest.TimeUnixNano = src.TimeUnixNano
+
switch t := src.Value.(type) {
- case *otlpmetrics.NumberDataPoint_AsDouble:
- var ov *otlpmetrics.NumberDataPoint_AsDouble
+ case *NumberDataPoint_AsDouble:
+ var ov *NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.NumberDataPoint_AsDouble{}
+ ov = &NumberDataPoint_AsDouble{}
} else {
- ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble)
+ ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
}
ov.AsDouble = t.AsDouble
dest.Value = ov
- case *otlpmetrics.NumberDataPoint_AsInt:
- var ov *otlpmetrics.NumberDataPoint_AsInt
+ case *NumberDataPoint_AsInt:
+ var ov *NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.NumberDataPoint_AsInt{}
+ ov = &NumberDataPoint_AsInt{}
} else {
- ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt)
+ ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
}
ov.AsInt = t.AsInt
dest.Value = ov
+ default:
+ dest.Value = nil
}
- dest.Exemplars = CopyOrigExemplarSlice(dest.Exemplars, src.Exemplars)
+ dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
+
dest.Flags = src.Flags
+
+ return dest
}
-func GenTestOrigNumberDataPoint() *otlpmetrics.NumberDataPoint {
- orig := NewOrigNumberDataPoint()
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.StartTimeUnixNano = 1234567890
- orig.TimeUnixNano = 1234567890
- orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{AsDouble: float64(3.1415926)}
- orig.Exemplars = GenerateOrigTestExemplarSlice()
- orig.Flags = 1
- return orig
+func CopyNumberDataPointSlice(dest, src []NumberDataPoint) []NumberDataPoint {
+ var newDest []NumberDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]NumberDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteNumberDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyNumberDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyNumberDataPointPtrSlice(dest, src []*NumberDataPoint) []*NumberDataPoint {
+ var newDest []*NumberDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*NumberDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewNumberDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteNumberDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewNumberDataPoint()
+ }
+ }
+ for i := range src {
+ CopyNumberDataPoint(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *NumberDataPoint) Reset() {
+ *orig = NumberDataPoint{}
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, dest *json.Stream) {
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *NumberDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -146,20 +240,20 @@ func MarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, dest *jso
dest.WriteUint64(orig.TimeUnixNano)
}
switch orig := orig.Value.(type) {
- case *otlpmetrics.NumberDataPoint_AsDouble:
+ case *NumberDataPoint_AsDouble:
dest.WriteObjectField("asDouble")
dest.WriteFloat64(orig.AsDouble)
- case *otlpmetrics.NumberDataPoint_AsInt:
+ case *NumberDataPoint_AsInt:
dest.WriteObjectField("asInt")
dest.WriteInt64(orig.AsInt)
}
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
- MarshalJSONOrigExemplar(&orig.Exemplars[0], dest)
+ orig.Exemplars[0].MarshalJSON(dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
- MarshalJSONOrigExemplar(&orig.Exemplars[i], dest)
+ orig.Exemplars[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -170,14 +264,14 @@ func MarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, dest *jso
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigNumberDataPoint unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *NumberDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
@@ -187,11 +281,11 @@ func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *j
case "asDouble", "as_double":
{
- var ov *otlpmetrics.NumberDataPoint_AsDouble
+ var ov *NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.NumberDataPoint_AsDouble{}
+ ov = &NumberDataPoint_AsDouble{}
} else {
- ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble)
+ ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
}
ov.AsDouble = iter.ReadFloat64()
orig.Value = ov
@@ -199,11 +293,11 @@ func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *j
case "asInt", "as_int":
{
- var ov *otlpmetrics.NumberDataPoint_AsInt
+ var ov *NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.NumberDataPoint_AsInt{}
+ ov = &NumberDataPoint_AsInt{}
} else {
- ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt)
+ ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
}
ov.AsInt = iter.ReadInt64()
orig.Value = ov
@@ -211,8 +305,8 @@ func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *j
case "exemplars":
for iter.ReadArray() {
- orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
- UnmarshalJSONOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter)
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
}
case "flags":
@@ -223,12 +317,12 @@ func UnmarshalJSONOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *j
}
}
-func SizeProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint) int {
+func (orig *NumberDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
@@ -241,13 +335,13 @@ func SizeProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint) int {
case nil:
_ = orig
break
- case *otlpmetrics.NumberDataPoint_AsDouble:
+ case *NumberDataPoint_AsDouble:
n += 9
- case *otlpmetrics.NumberDataPoint_AsInt:
+ case *NumberDataPoint_AsInt:
n += 9
}
for i := range orig.Exemplars {
- l = SizeProtoOrigExemplar(&orig.Exemplars[i])
+ l = orig.Exemplars[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
@@ -256,12 +350,12 @@ func SizeProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint) int {
return n
}
-func MarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []byte) int {
+func (orig *NumberDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -280,13 +374,13 @@ func MarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []by
buf[pos] = 0x19
}
switch orig := orig.Value.(type) {
- case *otlpmetrics.NumberDataPoint_AsDouble:
+ case *NumberDataPoint_AsDouble:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
pos--
buf[pos] = 0x21
- case *otlpmetrics.NumberDataPoint_AsInt:
+ case *NumberDataPoint_AsInt:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
pos--
@@ -294,7 +388,7 @@ func MarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []by
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
- l = MarshalProtoOrigExemplar(&orig.Exemplars[i], buf[:pos])
+ l = orig.Exemplars[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -308,7 +402,7 @@ func MarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []by
return len(buf) - pos
}
-func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []byte) error {
+func (orig *NumberDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -333,8 +427,8 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -372,11 +466,11 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []
if err != nil {
return err
}
- var ov *otlpmetrics.NumberDataPoint_AsDouble
+ var ov *NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.NumberDataPoint_AsDouble{}
+ ov = &NumberDataPoint_AsDouble{}
} else {
- ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble)
+ ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
}
ov.AsDouble = math.Float64frombits(num)
orig.Value = ov
@@ -390,11 +484,11 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []
if err != nil {
return err
}
- var ov *otlpmetrics.NumberDataPoint_AsInt
+ var ov *NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.NumberDataPoint_AsInt{}
+ ov = &NumberDataPoint_AsInt{}
} else {
- ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt)
+ ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
}
ov.AsInt = int64(num)
orig.Value = ov
@@ -409,8 +503,8 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []
return err
}
startPos := pos - length
- orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
- err = UnmarshalProtoOrigExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos])
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -435,3 +529,31 @@ func UnmarshalProtoOrigNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []
}
return nil
}
+
+func GenTestNumberDataPoint() *NumberDataPoint {
+ orig := NewNumberDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Value = &NumberDataPoint_AsDouble{AsDouble: float64(3.1415926)}
+ orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
+ orig.Flags = uint32(13)
+ return orig
+}
+
+func GenTestNumberDataPointPtrSlice() []*NumberDataPoint {
+ orig := make([]*NumberDataPoint, 5)
+ orig[0] = NewNumberDataPoint()
+ orig[1] = GenTestNumberDataPoint()
+ orig[2] = NewNumberDataPoint()
+ orig[3] = GenTestNumberDataPoint()
+ orig[4] = NewNumberDataPoint()
+ return orig
+}
+
+func GenTestNumberDataPointSlice() []NumberDataPoint {
+ orig := make([]NumberDataPoint, 5)
+ orig[1] = *GenTestNumberDataPoint()
+ orig[3] = *GenTestNumberDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go
new file mode 100644
index 000000000..c3a6460d2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go
@@ -0,0 +1,610 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Profile are an implementation of the pprofextended data model.
+
+type Profile struct {
+ SampleType ValueType
+ Samples []*Sample
+ TimeUnixNano uint64
+ DurationNano uint64
+ PeriodType ValueType
+ Period int64
+ ProfileId ProfileID
+ DroppedAttributesCount uint32
+ OriginalPayloadFormat string
+ OriginalPayload []byte
+ AttributeIndices []int32
+}
+
+var (
+ protoPoolProfile = sync.Pool{
+ New: func() any {
+ return &Profile{}
+ },
+ }
+)
+
+func NewProfile() *Profile {
+ if !UseProtoPooling.IsEnabled() {
+ return &Profile{}
+ }
+ return protoPoolProfile.Get().(*Profile)
+}
+
+func DeleteProfile(orig *Profile, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteValueType(&orig.SampleType, false)
+ for i := range orig.Samples {
+ DeleteSample(orig.Samples[i], true)
+ }
+ DeleteValueType(&orig.PeriodType, false)
+ DeleteProfileID(&orig.ProfileId, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfile.Put(orig)
+ }
+}
+
+func CopyProfile(dest, src *Profile) *Profile {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfile()
+ }
+ CopyValueType(&dest.SampleType, &src.SampleType)
+
+ dest.Samples = CopySamplePtrSlice(dest.Samples, src.Samples)
+
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ dest.DurationNano = src.DurationNano
+
+ CopyValueType(&dest.PeriodType, &src.PeriodType)
+
+ dest.Period = src.Period
+
+ CopyProfileID(&dest.ProfileId, &src.ProfileId)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ dest.OriginalPayloadFormat = src.OriginalPayloadFormat
+
+ dest.OriginalPayload = src.OriginalPayload
+
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+
+ return dest
+}
+
+func CopyProfileSlice(dest, src []Profile) []Profile {
+ var newDest []Profile
+ if cap(dest) < len(src) {
+ newDest = make([]Profile, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfile(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfile(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilePtrSlice(dest, src []*Profile) []*Profile {
+ var newDest []*Profile
+ if cap(dest) < len(src) {
+ newDest = make([]*Profile, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfile()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfile(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfile()
+ }
+ }
+ for i := range src {
+ CopyProfile(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Profile) Reset() {
+ *orig = Profile{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Profile) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("sampleType")
+ orig.SampleType.MarshalJSON(dest)
+ if len(orig.Samples) > 0 {
+ dest.WriteObjectField("samples")
+ dest.WriteArrayStart()
+ orig.Samples[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Samples); i++ {
+ dest.WriteMore()
+ orig.Samples[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ if orig.DurationNano != uint64(0) {
+ dest.WriteObjectField("durationNano")
+ dest.WriteUint64(orig.DurationNano)
+ }
+ dest.WriteObjectField("periodType")
+ orig.PeriodType.MarshalJSON(dest)
+ if orig.Period != int64(0) {
+ dest.WriteObjectField("period")
+ dest.WriteInt64(orig.Period)
+ }
+ if !orig.ProfileId.IsEmpty() {
+ dest.WriteObjectField("profileId")
+ orig.ProfileId.MarshalJSON(dest)
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ if orig.OriginalPayloadFormat != "" {
+ dest.WriteObjectField("originalPayloadFormat")
+ dest.WriteString(orig.OriginalPayloadFormat)
+ }
+
+ if len(orig.OriginalPayload) > 0 {
+ dest.WriteObjectField("originalPayload")
+ dest.WriteBytes(orig.OriginalPayload)
+ }
+ if len(orig.AttributeIndices) > 0 {
+ dest.WriteObjectField("attributeIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.AttributeIndices[0])
+ for i := 1; i < len(orig.AttributeIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.AttributeIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Profile) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "sampleType", "sample_type":
+
+ orig.SampleType.UnmarshalJSON(iter)
+ case "samples":
+ for iter.ReadArray() {
+ orig.Samples = append(orig.Samples, NewSample())
+ orig.Samples[len(orig.Samples)-1].UnmarshalJSON(iter)
+ }
+
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+ case "durationNano", "duration_nano":
+ orig.DurationNano = iter.ReadUint64()
+ case "periodType", "period_type":
+
+ orig.PeriodType.UnmarshalJSON(iter)
+ case "period":
+ orig.Period = iter.ReadInt64()
+ case "profileId", "profile_id":
+
+ orig.ProfileId.UnmarshalJSON(iter)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ case "originalPayloadFormat", "original_payload_format":
+ orig.OriginalPayloadFormat = iter.ReadString()
+ case "originalPayload", "original_payload":
+ orig.OriginalPayload = iter.ReadBytes()
+ case "attributeIndices", "attribute_indices":
+ for iter.ReadArray() {
+ orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Profile) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.SampleType.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Samples {
+ l = orig.Samples[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.DurationNano != 0 {
+ n += 1 + proto.Sov(uint64(orig.DurationNano))
+ }
+ l = orig.PeriodType.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.Period != 0 {
+ n += 1 + proto.Sov(uint64(orig.Period))
+ }
+ l = orig.ProfileId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ l = len(orig.OriginalPayloadFormat)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.OriginalPayload)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if len(orig.AttributeIndices) > 0 {
+ l = 0
+ for _, e := range orig.AttributeIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Profile) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.SampleType.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Samples) - 1; i >= 0; i-- {
+ l = orig.Samples[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x19
+ }
+ if orig.DurationNano != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DurationNano))
+ pos--
+ buf[pos] = 0x20
+ }
+ l = orig.PeriodType.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+
+ if orig.Period != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Period))
+ pos--
+ buf[pos] = 0x30
+ }
+ l = orig.ProfileId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x40
+ }
+ l = len(orig.OriginalPayloadFormat)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.OriginalPayloadFormat)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x4a
+ }
+ l = len(orig.OriginalPayload)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.OriginalPayload)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x52
+ }
+ l = len(orig.AttributeIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x5a
+ }
+ return len(buf) - pos
+}
+
+func (orig *Profile) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SampleType.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Samples = append(orig.Samples, NewSample())
+ err = orig.Samples[len(orig.Samples)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DurationNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DurationNano = uint64(num)
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PeriodType.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 6:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Period = int64(num)
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.ProfileId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 8:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+
+ case 9:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.OriginalPayloadFormat = string(buf[startPos:pos])
+
+ case 10:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.OriginalPayload = make([]byte, length)
+ copy(orig.OriginalPayload, buf[startPos:pos])
+ }
+ case 11:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfile() *Profile {
+ orig := NewProfile()
+ orig.SampleType = *GenTestValueType()
+ orig.Samples = []*Sample{{}, GenTestSample()}
+ orig.TimeUnixNano = uint64(13)
+ orig.DurationNano = uint64(13)
+ orig.PeriodType = *GenTestValueType()
+ orig.Period = int64(13)
+ orig.ProfileId = *GenTestProfileID()
+ orig.DroppedAttributesCount = uint32(13)
+ orig.OriginalPayloadFormat = "test_originalpayloadformat"
+ orig.OriginalPayload = []byte{1, 2, 3}
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestProfilePtrSlice() []*Profile {
+ orig := make([]*Profile, 5)
+ orig[0] = NewProfile()
+ orig[1] = GenTestProfile()
+ orig[2] = NewProfile()
+ orig[3] = GenTestProfile()
+ orig[4] = NewProfile()
+ return orig
+}
+
+func GenTestProfileSlice() []Profile {
+ orig := make([]Profile, 5)
+ orig[1] = *GenTestProfile()
+ orig[3] = *GenTestProfile()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go
new file mode 100644
index 000000000..f3d1c1a16
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go
@@ -0,0 +1,281 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ProfilesData represents the profiles data that can be stored in persistent storage,
+// OR can be embedded by other protocols that transfer OTLP profiles data but do not
+// implement the OTLP protocol.
+type ProfilesData struct {
+ ResourceProfiles []*ResourceProfiles
+ Dictionary ProfilesDictionary
+}
+
+var (
+ protoPoolProfilesData = sync.Pool{
+ New: func() any {
+ return &ProfilesData{}
+ },
+ }
+)
+
+func NewProfilesData() *ProfilesData {
+ if !UseProtoPooling.IsEnabled() {
+ return &ProfilesData{}
+ }
+ return protoPoolProfilesData.Get().(*ProfilesData)
+}
+
+func DeleteProfilesData(orig *ProfilesData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceProfiles {
+ DeleteResourceProfiles(orig.ResourceProfiles[i], true)
+ }
+ DeleteProfilesDictionary(&orig.Dictionary, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfilesData.Put(orig)
+ }
+}
+
+func CopyProfilesData(dest, src *ProfilesData) *ProfilesData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfilesData()
+ }
+ dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles)
+
+ CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary)
+
+ return dest
+}
+
+func CopyProfilesDataSlice(dest, src []ProfilesData) []ProfilesData {
+ var newDest []ProfilesData
+ if cap(dest) < len(src) {
+ newDest = make([]ProfilesData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfilesData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilesDataPtrSlice(dest, src []*ProfilesData) []*ProfilesData {
+ var newDest []*ProfilesData
+ if cap(dest) < len(src) {
+ newDest = make([]*ProfilesData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesData()
+ }
+ }
+ for i := range src {
+ CopyProfilesData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ProfilesData) Reset() {
+ *orig = ProfilesData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ProfilesData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceProfiles) > 0 {
+ dest.WriteObjectField("resourceProfiles")
+ dest.WriteArrayStart()
+ orig.ResourceProfiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceProfiles); i++ {
+ dest.WriteMore()
+ orig.ResourceProfiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectField("dictionary")
+ orig.Dictionary.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ProfilesData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceProfiles", "resource_profiles":
+ for iter.ReadArray() {
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "dictionary":
+
+ orig.Dictionary.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ProfilesData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceProfiles {
+ l = orig.ResourceProfiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.Dictionary.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ProfilesData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
+ l = orig.ResourceProfiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = orig.Dictionary.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *ProfilesData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Dictionary.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfilesData() *ProfilesData {
+ orig := NewProfilesData()
+ orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()}
+ orig.Dictionary = *GenTestProfilesDictionary()
+ return orig
+}
+
+func GenTestProfilesDataPtrSlice() []*ProfilesData {
+ orig := make([]*ProfilesData, 5)
+ orig[0] = NewProfilesData()
+ orig[1] = GenTestProfilesData()
+ orig[2] = NewProfilesData()
+ orig[3] = GenTestProfilesData()
+ orig[4] = NewProfilesData()
+ return orig
+}
+
+func GenTestProfilesDataSlice() []ProfilesData {
+ orig := make([]ProfilesData, 5)
+ orig[1] = *GenTestProfilesData()
+ orig[3] = *GenTestProfilesData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go
new file mode 100644
index 000000000..af6168e11
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go
@@ -0,0 +1,537 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ProfilesDictionary is the reference table containing all data shared by profiles across the message being sent.
+type ProfilesDictionary struct {
+ MappingTable []*Mapping
+ LocationTable []*Location
+ FunctionTable []*Function
+ LinkTable []*Link
+ StringTable []string
+ AttributeTable []*KeyValueAndUnit
+ StackTable []*Stack
+}
+
+var (
+ protoPoolProfilesDictionary = sync.Pool{
+ New: func() any {
+ return &ProfilesDictionary{}
+ },
+ }
+)
+
+func NewProfilesDictionary() *ProfilesDictionary {
+ if !UseProtoPooling.IsEnabled() {
+ return &ProfilesDictionary{}
+ }
+ return protoPoolProfilesDictionary.Get().(*ProfilesDictionary)
+}
+
+func DeleteProfilesDictionary(orig *ProfilesDictionary, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.MappingTable {
+ DeleteMapping(orig.MappingTable[i], true)
+ }
+ for i := range orig.LocationTable {
+ DeleteLocation(orig.LocationTable[i], true)
+ }
+ for i := range orig.FunctionTable {
+ DeleteFunction(orig.FunctionTable[i], true)
+ }
+ for i := range orig.LinkTable {
+ DeleteLink(orig.LinkTable[i], true)
+ }
+ for i := range orig.AttributeTable {
+ DeleteKeyValueAndUnit(orig.AttributeTable[i], true)
+ }
+ for i := range orig.StackTable {
+ DeleteStack(orig.StackTable[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfilesDictionary.Put(orig)
+ }
+}
+
+func CopyProfilesDictionary(dest, src *ProfilesDictionary) *ProfilesDictionary {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfilesDictionary()
+ }
+ dest.MappingTable = CopyMappingPtrSlice(dest.MappingTable, src.MappingTable)
+
+ dest.LocationTable = CopyLocationPtrSlice(dest.LocationTable, src.LocationTable)
+
+ dest.FunctionTable = CopyFunctionPtrSlice(dest.FunctionTable, src.FunctionTable)
+
+ dest.LinkTable = CopyLinkPtrSlice(dest.LinkTable, src.LinkTable)
+
+ dest.StringTable = append(dest.StringTable[:0], src.StringTable...)
+ dest.AttributeTable = CopyKeyValueAndUnitPtrSlice(dest.AttributeTable, src.AttributeTable)
+
+ dest.StackTable = CopyStackPtrSlice(dest.StackTable, src.StackTable)
+
+ return dest
+}
+
+func CopyProfilesDictionarySlice(dest, src []ProfilesDictionary) []ProfilesDictionary {
+ var newDest []ProfilesDictionary
+ if cap(dest) < len(src) {
+ newDest = make([]ProfilesDictionary, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesDictionary(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfilesDictionary(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilesDictionaryPtrSlice(dest, src []*ProfilesDictionary) []*ProfilesDictionary {
+ var newDest []*ProfilesDictionary
+ if cap(dest) < len(src) {
+ newDest = make([]*ProfilesDictionary, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesDictionary()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesDictionary(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesDictionary()
+ }
+ }
+ for i := range src {
+ CopyProfilesDictionary(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ProfilesDictionary) Reset() {
+ *orig = ProfilesDictionary{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ProfilesDictionary) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.MappingTable) > 0 {
+ dest.WriteObjectField("mappingTable")
+ dest.WriteArrayStart()
+ orig.MappingTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.MappingTable); i++ {
+ dest.WriteMore()
+ orig.MappingTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.LocationTable) > 0 {
+ dest.WriteObjectField("locationTable")
+ dest.WriteArrayStart()
+ orig.LocationTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.LocationTable); i++ {
+ dest.WriteMore()
+ orig.LocationTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.FunctionTable) > 0 {
+ dest.WriteObjectField("functionTable")
+ dest.WriteArrayStart()
+ orig.FunctionTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.FunctionTable); i++ {
+ dest.WriteMore()
+ orig.FunctionTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.LinkTable) > 0 {
+ dest.WriteObjectField("linkTable")
+ dest.WriteArrayStart()
+ orig.LinkTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.LinkTable); i++ {
+ dest.WriteMore()
+ orig.LinkTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.StringTable) > 0 {
+ dest.WriteObjectField("stringTable")
+ dest.WriteArrayStart()
+ dest.WriteString(orig.StringTable[0])
+ for i := 1; i < len(orig.StringTable); i++ {
+ dest.WriteMore()
+ dest.WriteString(orig.StringTable[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.AttributeTable) > 0 {
+ dest.WriteObjectField("attributeTable")
+ dest.WriteArrayStart()
+ orig.AttributeTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.AttributeTable); i++ {
+ dest.WriteMore()
+ orig.AttributeTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.StackTable) > 0 {
+ dest.WriteObjectField("stackTable")
+ dest.WriteArrayStart()
+ orig.StackTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.StackTable); i++ {
+ dest.WriteMore()
+ orig.StackTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ProfilesDictionary) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "mappingTable", "mapping_table":
+ for iter.ReadArray() {
+ orig.MappingTable = append(orig.MappingTable, NewMapping())
+ orig.MappingTable[len(orig.MappingTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "locationTable", "location_table":
+ for iter.ReadArray() {
+ orig.LocationTable = append(orig.LocationTable, NewLocation())
+ orig.LocationTable[len(orig.LocationTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "functionTable", "function_table":
+ for iter.ReadArray() {
+ orig.FunctionTable = append(orig.FunctionTable, NewFunction())
+ orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "linkTable", "link_table":
+ for iter.ReadArray() {
+ orig.LinkTable = append(orig.LinkTable, NewLink())
+ orig.LinkTable[len(orig.LinkTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "stringTable", "string_table":
+ for iter.ReadArray() {
+ orig.StringTable = append(orig.StringTable, iter.ReadString())
+ }
+
+ case "attributeTable", "attribute_table":
+ for iter.ReadArray() {
+ orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
+ orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "stackTable", "stack_table":
+ for iter.ReadArray() {
+ orig.StackTable = append(orig.StackTable, NewStack())
+ orig.StackTable[len(orig.StackTable)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ProfilesDictionary) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.MappingTable {
+ l = orig.MappingTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.LocationTable {
+ l = orig.LocationTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.FunctionTable {
+ l = orig.FunctionTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.LinkTable {
+ l = orig.LinkTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for _, s := range orig.StringTable {
+ l = len(s)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.AttributeTable {
+ l = orig.AttributeTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.StackTable {
+ l = orig.StackTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ProfilesDictionary) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.MappingTable) - 1; i >= 0; i-- {
+ l = orig.MappingTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ for i := len(orig.LocationTable) - 1; i >= 0; i-- {
+ l = orig.LocationTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ for i := len(orig.FunctionTable) - 1; i >= 0; i-- {
+ l = orig.FunctionTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.LinkTable) - 1; i >= 0; i-- {
+ l = orig.LinkTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ for i := len(orig.StringTable) - 1; i >= 0; i-- {
+ l = len(orig.StringTable[i])
+ pos -= l
+ copy(buf[pos:], orig.StringTable[i])
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ for i := len(orig.AttributeTable) - 1; i >= 0; i-- {
+ l = orig.AttributeTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ for i := len(orig.StackTable) - 1; i >= 0; i-- {
+ l = orig.StackTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ProfilesDictionary) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.MappingTable = append(orig.MappingTable, NewMapping())
+ err = orig.MappingTable[len(orig.MappingTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.LocationTable = append(orig.LocationTable, NewLocation())
+ err = orig.LocationTable[len(orig.LocationTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.FunctionTable = append(orig.FunctionTable, NewFunction())
+ err = orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.LinkTable = append(orig.LinkTable, NewLink())
+ err = orig.LinkTable[len(orig.LinkTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.StringTable = append(orig.StringTable, string(buf[startPos:pos]))
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
+ err = orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field StackTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.StackTable = append(orig.StackTable, NewStack())
+ err = orig.StackTable[len(orig.StackTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfilesDictionary() *ProfilesDictionary {
+ orig := NewProfilesDictionary()
+ orig.MappingTable = []*Mapping{{}, GenTestMapping()}
+ orig.LocationTable = []*Location{{}, GenTestLocation()}
+ orig.FunctionTable = []*Function{{}, GenTestFunction()}
+ orig.LinkTable = []*Link{{}, GenTestLink()}
+ orig.StringTable = []string{"", "test_stringtable"}
+ orig.AttributeTable = []*KeyValueAndUnit{{}, GenTestKeyValueAndUnit()}
+ orig.StackTable = []*Stack{{}, GenTestStack()}
+ return orig
+}
+
+func GenTestProfilesDictionaryPtrSlice() []*ProfilesDictionary {
+ orig := make([]*ProfilesDictionary, 5)
+ orig[0] = NewProfilesDictionary()
+ orig[1] = GenTestProfilesDictionary()
+ orig[2] = NewProfilesDictionary()
+ orig[3] = GenTestProfilesDictionary()
+ orig[4] = NewProfilesDictionary()
+ return orig
+}
+
+func GenTestProfilesDictionarySlice() []ProfilesDictionary {
+ orig := make([]ProfilesDictionary, 5)
+ orig[1] = *GenTestProfilesDictionary()
+ orig[3] = *GenTestProfilesDictionary()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go
new file mode 100644
index 000000000..287b9ee4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type ProfilesRequest struct {
+ RequestContext *RequestContext
+ ProfilesData ProfilesData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolProfilesRequest = sync.Pool{
+ New: func() any {
+ return &ProfilesRequest{}
+ },
+ }
+)
+
+func NewProfilesRequest() *ProfilesRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ProfilesRequest{}
+ }
+ return protoPoolProfilesRequest.Get().(*ProfilesRequest)
+}
+
+func DeleteProfilesRequest(orig *ProfilesRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteProfilesData(&orig.ProfilesData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfilesRequest.Put(orig)
+ }
+}
+
+func CopyProfilesRequest(dest, src *ProfilesRequest) *ProfilesRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfilesRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyProfilesData(&dest.ProfilesData, &src.ProfilesData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyProfilesRequestSlice(dest, src []ProfilesRequest) []ProfilesRequest {
+ var newDest []ProfilesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ProfilesRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfilesRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilesRequestPtrSlice(dest, src []*ProfilesRequest) []*ProfilesRequest {
+ var newDest []*ProfilesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ProfilesRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesRequest()
+ }
+ }
+ for i := range src {
+ CopyProfilesRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ProfilesRequest) Reset() {
+ *orig = ProfilesRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ProfilesRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("profilesData")
+ orig.ProfilesData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ProfilesRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "profilesData", "profiles_data":
+
+ orig.ProfilesData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ProfilesRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.ProfilesData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *ProfilesRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.ProfilesData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *ProfilesRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProfilesData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.ProfilesData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfilesRequest() *ProfilesRequest {
+ orig := NewProfilesRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.ProfilesData = *GenTestProfilesData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestProfilesRequestPtrSlice() []*ProfilesRequest {
+ orig := make([]*ProfilesRequest, 5)
+ orig[0] = NewProfilesRequest()
+ orig[1] = GenTestProfilesRequest()
+ orig[2] = NewProfilesRequest()
+ orig[3] = GenTestProfilesRequest()
+ orig[4] = NewProfilesRequest()
+ return orig
+}
+
+func GenTestProfilesRequestSlice() []ProfilesRequest {
+ orig := make([]ProfilesRequest, 5)
+ orig[1] = *GenTestProfilesRequest()
+ orig[3] = *GenTestProfilesRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go
new file mode 100644
index 000000000..4921ea97f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go
@@ -0,0 +1,654 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *RequestContext) GetClientAddress() any {
+ if m != nil {
+ return m.ClientAddress
+ }
+ return nil
+}
+
+type RequestContext_IP struct {
+ IP *IPAddr
+}
+
+func (m *RequestContext) GetIP() *IPAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_IP); ok {
+ return v.IP
+ }
+ return nil
+}
+
+type RequestContext_TCP struct {
+ TCP *TCPAddr
+}
+
+func (m *RequestContext) GetTCP() *TCPAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_TCP); ok {
+ return v.TCP
+ }
+ return nil
+}
+
+type RequestContext_UDP struct {
+ UDP *UDPAddr
+}
+
+func (m *RequestContext) GetUDP() *UDPAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_UDP); ok {
+ return v.UDP
+ }
+ return nil
+}
+
+type RequestContext_Unix struct {
+ Unix *UnixAddr
+}
+
+func (m *RequestContext) GetUnix() *UnixAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_Unix); ok {
+ return v.Unix
+ }
+ return nil
+}
+
+type RequestContext struct {
+ SpanContext *SpanContext
+ ClientMetadata []KeyValue
+ ClientAddress any
+}
+
+var (
+ protoPoolRequestContext = sync.Pool{
+ New: func() any {
+ return &RequestContext{}
+ },
+ }
+
+ ProtoPoolRequestContext_IP = sync.Pool{
+ New: func() any {
+ return &RequestContext_IP{}
+ },
+ }
+
+ ProtoPoolRequestContext_TCP = sync.Pool{
+ New: func() any {
+ return &RequestContext_TCP{}
+ },
+ }
+
+ ProtoPoolRequestContext_UDP = sync.Pool{
+ New: func() any {
+ return &RequestContext_UDP{}
+ },
+ }
+
+ ProtoPoolRequestContext_Unix = sync.Pool{
+ New: func() any {
+ return &RequestContext_Unix{}
+ },
+ }
+)
+
+func NewRequestContext() *RequestContext {
+ if !UseProtoPooling.IsEnabled() {
+ return &RequestContext{}
+ }
+ return protoPoolRequestContext.Get().(*RequestContext)
+}
+
+func DeleteRequestContext(orig *RequestContext, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteSpanContext(orig.SpanContext, true)
+ for i := range orig.ClientMetadata {
+ DeleteKeyValue(&orig.ClientMetadata[i], false)
+ }
+ switch ov := orig.ClientAddress.(type) {
+ case *RequestContext_IP:
+ DeleteIPAddr(ov.IP, true)
+ ov.IP = nil
+ ProtoPoolRequestContext_IP.Put(ov)
+ case *RequestContext_TCP:
+ DeleteTCPAddr(ov.TCP, true)
+ ov.TCP = nil
+ ProtoPoolRequestContext_TCP.Put(ov)
+ case *RequestContext_UDP:
+ DeleteUDPAddr(ov.UDP, true)
+ ov.UDP = nil
+ ProtoPoolRequestContext_UDP.Put(ov)
+ case *RequestContext_Unix:
+ DeleteUnixAddr(ov.Unix, true)
+ ov.Unix = nil
+ ProtoPoolRequestContext_Unix.Put(ov)
+
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolRequestContext.Put(orig)
+ }
+}
+
+func CopyRequestContext(dest, src *RequestContext) *RequestContext {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewRequestContext()
+ }
+ dest.SpanContext = CopySpanContext(dest.SpanContext, src.SpanContext)
+
+ dest.ClientMetadata = CopyKeyValueSlice(dest.ClientMetadata, src.ClientMetadata)
+
+ switch t := src.ClientAddress.(type) {
+ case *RequestContext_IP:
+ var ov *RequestContext_IP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_IP{}
+ } else {
+ ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
+ }
+ ov.IP = NewIPAddr()
+ CopyIPAddr(ov.IP, t.IP)
+ dest.ClientAddress = ov
+
+ case *RequestContext_TCP:
+ var ov *RequestContext_TCP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_TCP{}
+ } else {
+ ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
+ }
+ ov.TCP = NewTCPAddr()
+ CopyTCPAddr(ov.TCP, t.TCP)
+ dest.ClientAddress = ov
+
+ case *RequestContext_UDP:
+ var ov *RequestContext_UDP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_UDP{}
+ } else {
+ ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
+ }
+ ov.UDP = NewUDPAddr()
+ CopyUDPAddr(ov.UDP, t.UDP)
+ dest.ClientAddress = ov
+
+ case *RequestContext_Unix:
+ var ov *RequestContext_Unix
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_Unix{}
+ } else {
+ ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
+ }
+ ov.Unix = NewUnixAddr()
+ CopyUnixAddr(ov.Unix, t.Unix)
+ dest.ClientAddress = ov
+
+ default:
+ dest.ClientAddress = nil
+ }
+
+ return dest
+}
+
+func CopyRequestContextSlice(dest, src []RequestContext) []RequestContext {
+ var newDest []RequestContext
+ if cap(dest) < len(src) {
+ newDest = make([]RequestContext, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteRequestContext(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyRequestContext(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyRequestContextPtrSlice(dest, src []*RequestContext) []*RequestContext {
+ var newDest []*RequestContext
+ if cap(dest) < len(src) {
+ newDest = make([]*RequestContext, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewRequestContext()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteRequestContext(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewRequestContext()
+ }
+ }
+ for i := range src {
+ CopyRequestContext(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *RequestContext) Reset() {
+ *orig = RequestContext{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *RequestContext) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.SpanContext != nil {
+ dest.WriteObjectField("spanContext")
+ orig.SpanContext.MarshalJSON(dest)
+ }
+ if len(orig.ClientMetadata) > 0 {
+ dest.WriteObjectField("clientMetadata")
+ dest.WriteArrayStart()
+ orig.ClientMetadata[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ClientMetadata); i++ {
+ dest.WriteMore()
+ orig.ClientMetadata[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ switch orig := orig.ClientAddress.(type) {
+ case *RequestContext_IP:
+ if orig.IP != nil {
+ dest.WriteObjectField("iP")
+ orig.IP.MarshalJSON(dest)
+ }
+ case *RequestContext_TCP:
+ if orig.TCP != nil {
+ dest.WriteObjectField("tCP")
+ orig.TCP.MarshalJSON(dest)
+ }
+ case *RequestContext_UDP:
+ if orig.UDP != nil {
+ dest.WriteObjectField("uDP")
+ orig.UDP.MarshalJSON(dest)
+ }
+ case *RequestContext_Unix:
+ if orig.Unix != nil {
+ dest.WriteObjectField("unix")
+ orig.Unix.MarshalJSON(dest)
+ }
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *RequestContext) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "spanContext", "span_context":
+ orig.SpanContext = NewSpanContext()
+ orig.SpanContext.UnmarshalJSON(iter)
+ case "clientMetadata", "client_metadata":
+ for iter.ReadArray() {
+ orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{})
+ orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalJSON(iter)
+ }
+
+ case "iP":
+ {
+ var ov *RequestContext_IP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_IP{}
+ } else {
+ ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
+ }
+ ov.IP = NewIPAddr()
+ ov.IP.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ case "tCP":
+ {
+ var ov *RequestContext_TCP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_TCP{}
+ } else {
+ ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
+ }
+ ov.TCP = NewTCPAddr()
+ ov.TCP.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ case "uDP":
+ {
+ var ov *RequestContext_UDP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_UDP{}
+ } else {
+ ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
+ }
+ ov.UDP = NewUDPAddr()
+ ov.UDP.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ case "unix":
+ {
+ var ov *RequestContext_Unix
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_Unix{}
+ } else {
+ ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
+ }
+ ov.Unix = NewUnixAddr()
+ ov.Unix.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *RequestContext) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.SpanContext != nil {
+ l = orig.SpanContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.ClientMetadata {
+ l = orig.ClientMetadata[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ switch orig := orig.ClientAddress.(type) {
+ case nil:
+ _ = orig
+ break
+ case *RequestContext_IP:
+ if orig.IP != nil {
+ l = orig.IP.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *RequestContext_TCP:
+ if orig.TCP != nil {
+ l = orig.TCP.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *RequestContext_UDP:
+ if orig.UDP != nil {
+ l = orig.UDP.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *RequestContext_Unix:
+ if orig.Unix != nil {
+ l = orig.Unix.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ }
+ return n
+}
+
+func (orig *RequestContext) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.SpanContext != nil {
+ l = orig.SpanContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ for i := len(orig.ClientMetadata) - 1; i >= 0; i-- {
+ l = orig.ClientMetadata[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ switch orig := orig.ClientAddress.(type) {
+ case *RequestContext_IP:
+ if orig.IP != nil {
+ l = orig.IP.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ case *RequestContext_TCP:
+ if orig.TCP != nil {
+ l = orig.TCP.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ case *RequestContext_UDP:
+ if orig.UDP != nil {
+ l = orig.UDP.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ case *RequestContext_Unix:
+ if orig.Unix != nil {
+ l = orig.Unix.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ }
+ return len(buf) - pos
+}
+
+func (orig *RequestContext) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.SpanContext = NewSpanContext()
+ err = orig.SpanContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientMetadata", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{})
+ err = orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_IP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_IP{}
+ } else {
+ ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
+ }
+ ov.IP = NewIPAddr()
+ err = ov.IP.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_TCP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_TCP{}
+ } else {
+ ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
+ }
+ ov.TCP = NewTCPAddr()
+ err = ov.TCP.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field UDP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_UDP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_UDP{}
+ } else {
+ ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
+ }
+ ov.UDP = NewUDPAddr()
+ err = ov.UDP.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Unix", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_Unix
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_Unix{}
+ } else {
+ ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
+ }
+ ov.Unix = NewUnixAddr()
+ err = ov.Unix.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestRequestContext() *RequestContext {
+ orig := NewRequestContext()
+ orig.SpanContext = GenTestSpanContext()
+ orig.ClientMetadata = []KeyValue{{}, *GenTestKeyValue()}
+ orig.ClientAddress = &RequestContext_IP{IP: GenTestIPAddr()}
+ return orig
+}
+
+func GenTestRequestContextPtrSlice() []*RequestContext {
+ orig := make([]*RequestContext, 5)
+ orig[0] = NewRequestContext()
+ orig[1] = GenTestRequestContext()
+ orig[2] = NewRequestContext()
+ orig[3] = GenTestRequestContext()
+ orig[4] = NewRequestContext()
+ return orig
+}
+
+func GenTestRequestContextSlice() []RequestContext {
+ orig := make([]RequestContext, 5)
+ orig[1] = *GenTestRequestContext()
+ orig[3] = *GenTestRequestContext()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go
new file mode 100644
index 000000000..7bef0ffcb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go
@@ -0,0 +1,325 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Resource is a message representing the resource information.
+type Resource struct {
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ EntityRefs []*EntityRef
+}
+
+var (
+ protoPoolResource = sync.Pool{
+ New: func() any {
+ return &Resource{}
+ },
+ }
+)
+
+func NewResource() *Resource {
+ if !UseProtoPooling.IsEnabled() {
+ return &Resource{}
+ }
+ return protoPoolResource.Get().(*Resource)
+}
+
+func DeleteResource(orig *Resource, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ for i := range orig.EntityRefs {
+ DeleteEntityRef(orig.EntityRefs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResource.Put(orig)
+ }
+}
+
+func CopyResource(dest, src *Resource) *Resource {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResource()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ dest.EntityRefs = CopyEntityRefPtrSlice(dest.EntityRefs, src.EntityRefs)
+
+ return dest
+}
+
+func CopyResourceSlice(dest, src []Resource) []Resource {
+ var newDest []Resource
+ if cap(dest) < len(src) {
+ newDest = make([]Resource, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResource(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResource(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourcePtrSlice(dest, src []*Resource) []*Resource {
+ var newDest []*Resource
+ if cap(dest) < len(src) {
+ newDest = make([]*Resource, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResource()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResource(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResource()
+ }
+ }
+ for i := range src {
+ CopyResource(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Resource) Reset() {
+ *orig = Resource{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Resource) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ if len(orig.EntityRefs) > 0 {
+ dest.WriteObjectField("entityRefs")
+ dest.WriteArrayStart()
+ orig.EntityRefs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.EntityRefs); i++ {
+ dest.WriteMore()
+ orig.EntityRefs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Resource) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ case "entityRefs", "entity_refs":
+ for iter.ReadArray() {
+ orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
+ orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Resource) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ for i := range orig.EntityRefs {
+ l = orig.EntityRefs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Resource) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x10
+ }
+ for i := len(orig.EntityRefs) - 1; i >= 0; i-- {
+ l = orig.EntityRefs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *Resource) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
+ err = orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResource() *Resource {
+ orig := NewResource()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.EntityRefs = []*EntityRef{{}, GenTestEntityRef()}
+ return orig
+}
+
+func GenTestResourcePtrSlice() []*Resource {
+ orig := make([]*Resource, 5)
+ orig[0] = NewResource()
+ orig[1] = GenTestResource()
+ orig[2] = NewResource()
+ orig[3] = GenTestResource()
+ orig[4] = NewResource()
+ return orig
+}
+
+func GenTestResourceSlice() []Resource {
+ orig := make([]Resource, 5)
+ orig[1] = *GenTestResource()
+ orig[3] = *GenTestResource()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go
new file mode 100644
index 000000000..57c32e3c1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go
@@ -0,0 +1,365 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceLogs is a collection of logs from a Resource.
+type ResourceLogs struct {
+ Resource Resource
+ ScopeLogs []*ScopeLogs
+ SchemaUrl string
+ DeprecatedScopeLogs []*ScopeLogs
+}
+
+var (
+ protoPoolResourceLogs = sync.Pool{
+ New: func() any {
+ return &ResourceLogs{}
+ },
+ }
+)
+
+func NewResourceLogs() *ResourceLogs {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceLogs{}
+ }
+ return protoPoolResourceLogs.Get().(*ResourceLogs)
+}
+
+func DeleteResourceLogs(orig *ResourceLogs, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeLogs {
+ DeleteScopeLogs(orig.ScopeLogs[i], true)
+ }
+ for i := range orig.DeprecatedScopeLogs {
+ DeleteScopeLogs(orig.DeprecatedScopeLogs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceLogs.Put(orig)
+ }
+}
+
+func CopyResourceLogs(dest, src *ResourceLogs) *ResourceLogs {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceLogs()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeLogs = CopyScopeLogsPtrSlice(dest.ScopeLogs, src.ScopeLogs)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.DeprecatedScopeLogs = CopyScopeLogsPtrSlice(dest.DeprecatedScopeLogs, src.DeprecatedScopeLogs)
+
+ return dest
+}
+
+func CopyResourceLogsSlice(dest, src []ResourceLogs) []ResourceLogs {
+ var newDest []ResourceLogs
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceLogs, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceLogs(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceLogs(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceLogsPtrSlice(dest, src []*ResourceLogs) []*ResourceLogs {
+ var newDest []*ResourceLogs
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceLogs, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceLogs()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceLogs(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceLogs()
+ }
+ }
+ for i := range src {
+ CopyResourceLogs(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceLogs) Reset() {
+ *orig = ResourceLogs{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceLogs) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeLogs) > 0 {
+ dest.WriteObjectField("scopeLogs")
+ dest.WriteArrayStart()
+ orig.ScopeLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeLogs); i++ {
+ dest.WriteMore()
+ orig.ScopeLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if len(orig.DeprecatedScopeLogs) > 0 {
+ dest.WriteObjectField("deprecatedScopeLogs")
+ dest.WriteArrayStart()
+ orig.DeprecatedScopeLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DeprecatedScopeLogs); i++ {
+ dest.WriteMore()
+ orig.DeprecatedScopeLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceLogs) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeLogs", "scope_logs":
+ for iter.ReadArray() {
+ orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
+ orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "deprecatedScopeLogs", "deprecated_scope_logs":
+ for iter.ReadArray() {
+ orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs())
+ orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceLogs) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeLogs {
+ l = orig.ScopeLogs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.DeprecatedScopeLogs {
+ l = orig.DeprecatedScopeLogs[i].SizeProto()
+ n += 2 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceLogs) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeLogs) - 1; i >= 0; i-- {
+ l = orig.ScopeLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DeprecatedScopeLogs) - 1; i >= 0; i-- {
+ l = orig.DeprecatedScopeLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3e
+ pos--
+ buf[pos] = 0xc2
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceLogs) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
+ err = orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 1000:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs())
+ err = orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceLogs() *ResourceLogs {
+ orig := NewResourceLogs()
+ orig.Resource = *GenTestResource()
+ orig.ScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()}
+ orig.SchemaUrl = "test_schemaurl"
+ orig.DeprecatedScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()}
+ return orig
+}
+
+func GenTestResourceLogsPtrSlice() []*ResourceLogs {
+ orig := make([]*ResourceLogs, 5)
+ orig[0] = NewResourceLogs()
+ orig[1] = GenTestResourceLogs()
+ orig[2] = NewResourceLogs()
+ orig[3] = GenTestResourceLogs()
+ orig[4] = NewResourceLogs()
+ return orig
+}
+
+func GenTestResourceLogsSlice() []ResourceLogs {
+ orig := make([]ResourceLogs, 5)
+ orig[1] = *GenTestResourceLogs()
+ orig[3] = *GenTestResourceLogs()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go
new file mode 100644
index 000000000..a260fe62e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go
@@ -0,0 +1,365 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceMetrics is a collection of metrics from a Resource.
+type ResourceMetrics struct {
+ Resource Resource
+ ScopeMetrics []*ScopeMetrics
+ SchemaUrl string
+ DeprecatedScopeMetrics []*ScopeMetrics
+}
+
+var (
+ protoPoolResourceMetrics = sync.Pool{
+ New: func() any {
+ return &ResourceMetrics{}
+ },
+ }
+)
+
+func NewResourceMetrics() *ResourceMetrics {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceMetrics{}
+ }
+ return protoPoolResourceMetrics.Get().(*ResourceMetrics)
+}
+
+func DeleteResourceMetrics(orig *ResourceMetrics, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeMetrics {
+ DeleteScopeMetrics(orig.ScopeMetrics[i], true)
+ }
+ for i := range orig.DeprecatedScopeMetrics {
+ DeleteScopeMetrics(orig.DeprecatedScopeMetrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceMetrics.Put(orig)
+ }
+}
+
+func CopyResourceMetrics(dest, src *ResourceMetrics) *ResourceMetrics {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceMetrics()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeMetrics = CopyScopeMetricsPtrSlice(dest.ScopeMetrics, src.ScopeMetrics)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.DeprecatedScopeMetrics = CopyScopeMetricsPtrSlice(dest.DeprecatedScopeMetrics, src.DeprecatedScopeMetrics)
+
+ return dest
+}
+
+func CopyResourceMetricsSlice(dest, src []ResourceMetrics) []ResourceMetrics {
+ var newDest []ResourceMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceMetrics, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceMetrics(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceMetrics(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceMetricsPtrSlice(dest, src []*ResourceMetrics) []*ResourceMetrics {
+ var newDest []*ResourceMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceMetrics, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceMetrics()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceMetrics(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceMetrics()
+ }
+ }
+ for i := range src {
+ CopyResourceMetrics(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceMetrics) Reset() {
+ *orig = ResourceMetrics{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceMetrics) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeMetrics) > 0 {
+ dest.WriteObjectField("scopeMetrics")
+ dest.WriteArrayStart()
+ orig.ScopeMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeMetrics); i++ {
+ dest.WriteMore()
+ orig.ScopeMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if len(orig.DeprecatedScopeMetrics) > 0 {
+ dest.WriteObjectField("deprecatedScopeMetrics")
+ dest.WriteArrayStart()
+ orig.DeprecatedScopeMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DeprecatedScopeMetrics); i++ {
+ dest.WriteMore()
+ orig.DeprecatedScopeMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceMetrics) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeMetrics", "scope_metrics":
+ for iter.ReadArray() {
+ orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
+ orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "deprecatedScopeMetrics", "deprecated_scope_metrics":
+ for iter.ReadArray() {
+ orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics())
+ orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceMetrics) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeMetrics {
+ l = orig.ScopeMetrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.DeprecatedScopeMetrics {
+ l = orig.DeprecatedScopeMetrics[i].SizeProto()
+ n += 2 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceMetrics) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeMetrics) - 1; i >= 0; i-- {
+ l = orig.ScopeMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DeprecatedScopeMetrics) - 1; i >= 0; i-- {
+ l = orig.DeprecatedScopeMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3e
+ pos--
+ buf[pos] = 0xc2
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceMetrics) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
+ err = orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 1000:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics())
+ err = orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceMetrics() *ResourceMetrics {
+ orig := NewResourceMetrics()
+ orig.Resource = *GenTestResource()
+ orig.ScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()}
+ orig.SchemaUrl = "test_schemaurl"
+ orig.DeprecatedScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()}
+ return orig
+}
+
+func GenTestResourceMetricsPtrSlice() []*ResourceMetrics {
+ orig := make([]*ResourceMetrics, 5)
+ orig[0] = NewResourceMetrics()
+ orig[1] = GenTestResourceMetrics()
+ orig[2] = NewResourceMetrics()
+ orig[3] = GenTestResourceMetrics()
+ orig[4] = NewResourceMetrics()
+ return orig
+}
+
+func GenTestResourceMetricsSlice() []ResourceMetrics {
+ orig := make([]ResourceMetrics, 5)
+ orig[1] = *GenTestResourceMetrics()
+ orig[3] = *GenTestResourceMetrics()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go
new file mode 100644
index 000000000..69522c24d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceProfiles is a collection of profiles from a Resource.
+type ResourceProfiles struct {
+ Resource Resource
+ ScopeProfiles []*ScopeProfiles
+ SchemaUrl string
+}
+
+var (
+ protoPoolResourceProfiles = sync.Pool{
+ New: func() any {
+ return &ResourceProfiles{}
+ },
+ }
+)
+
+func NewResourceProfiles() *ResourceProfiles {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceProfiles{}
+ }
+ return protoPoolResourceProfiles.Get().(*ResourceProfiles)
+}
+
+func DeleteResourceProfiles(orig *ResourceProfiles, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeProfiles {
+ DeleteScopeProfiles(orig.ScopeProfiles[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceProfiles.Put(orig)
+ }
+}
+
+func CopyResourceProfiles(dest, src *ResourceProfiles) *ResourceProfiles {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceProfiles()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeProfiles = CopyScopeProfilesPtrSlice(dest.ScopeProfiles, src.ScopeProfiles)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyResourceProfilesSlice(dest, src []ResourceProfiles) []ResourceProfiles {
+ var newDest []ResourceProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceProfiles, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceProfiles(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceProfiles(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceProfilesPtrSlice(dest, src []*ResourceProfiles) []*ResourceProfiles {
+ var newDest []*ResourceProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceProfiles, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceProfiles()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceProfiles(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceProfiles()
+ }
+ }
+ for i := range src {
+ CopyResourceProfiles(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceProfiles) Reset() {
+ *orig = ResourceProfiles{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceProfiles) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeProfiles) > 0 {
+ dest.WriteObjectField("scopeProfiles")
+ dest.WriteArrayStart()
+ orig.ScopeProfiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeProfiles); i++ {
+ dest.WriteMore()
+ orig.ScopeProfiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceProfiles) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeProfiles", "scope_profiles":
+ for iter.ReadArray() {
+ orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
+ orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceProfiles) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeProfiles {
+ l = orig.ScopeProfiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceProfiles) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeProfiles) - 1; i >= 0; i-- {
+ l = orig.ScopeProfiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceProfiles) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
+ err = orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceProfiles() *ResourceProfiles {
+ orig := NewResourceProfiles()
+ orig.Resource = *GenTestResource()
+ orig.ScopeProfiles = []*ScopeProfiles{{}, GenTestScopeProfiles()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestResourceProfilesPtrSlice() []*ResourceProfiles {
+ orig := make([]*ResourceProfiles, 5)
+ orig[0] = NewResourceProfiles()
+ orig[1] = GenTestResourceProfiles()
+ orig[2] = NewResourceProfiles()
+ orig[3] = GenTestResourceProfiles()
+ orig[4] = NewResourceProfiles()
+ return orig
+}
+
+func GenTestResourceProfilesSlice() []ResourceProfiles {
+ orig := make([]ResourceProfiles, 5)
+ orig[1] = *GenTestResourceProfiles()
+ orig[3] = *GenTestResourceProfiles()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go
new file mode 100644
index 000000000..bd70dedb8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go
@@ -0,0 +1,365 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceSpans is a collection of spans from a Resource.
+type ResourceSpans struct {
+ Resource Resource
+ ScopeSpans []*ScopeSpans
+ SchemaUrl string
+ DeprecatedScopeSpans []*ScopeSpans
+}
+
+var (
+ protoPoolResourceSpans = sync.Pool{
+ New: func() any {
+ return &ResourceSpans{}
+ },
+ }
+)
+
+func NewResourceSpans() *ResourceSpans {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceSpans{}
+ }
+ return protoPoolResourceSpans.Get().(*ResourceSpans)
+}
+
+func DeleteResourceSpans(orig *ResourceSpans, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeSpans {
+ DeleteScopeSpans(orig.ScopeSpans[i], true)
+ }
+ for i := range orig.DeprecatedScopeSpans {
+ DeleteScopeSpans(orig.DeprecatedScopeSpans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceSpans.Put(orig)
+ }
+}
+
+func CopyResourceSpans(dest, src *ResourceSpans) *ResourceSpans {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceSpans()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeSpans = CopyScopeSpansPtrSlice(dest.ScopeSpans, src.ScopeSpans)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.DeprecatedScopeSpans = CopyScopeSpansPtrSlice(dest.DeprecatedScopeSpans, src.DeprecatedScopeSpans)
+
+ return dest
+}
+
+func CopyResourceSpansSlice(dest, src []ResourceSpans) []ResourceSpans {
+ var newDest []ResourceSpans
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceSpans, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceSpans(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceSpans(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceSpansPtrSlice(dest, src []*ResourceSpans) []*ResourceSpans {
+ var newDest []*ResourceSpans
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceSpans, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceSpans()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceSpans(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceSpans()
+ }
+ }
+ for i := range src {
+ CopyResourceSpans(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceSpans) Reset() {
+ *orig = ResourceSpans{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceSpans) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeSpans) > 0 {
+ dest.WriteObjectField("scopeSpans")
+ dest.WriteArrayStart()
+ orig.ScopeSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeSpans); i++ {
+ dest.WriteMore()
+ orig.ScopeSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if len(orig.DeprecatedScopeSpans) > 0 {
+ dest.WriteObjectField("deprecatedScopeSpans")
+ dest.WriteArrayStart()
+ orig.DeprecatedScopeSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DeprecatedScopeSpans); i++ {
+ dest.WriteMore()
+ orig.DeprecatedScopeSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceSpans) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeSpans", "scope_spans":
+ for iter.ReadArray() {
+ orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
+ orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "deprecatedScopeSpans", "deprecated_scope_spans":
+ for iter.ReadArray() {
+ orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans())
+ orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceSpans) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeSpans {
+ l = orig.ScopeSpans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.DeprecatedScopeSpans {
+ l = orig.DeprecatedScopeSpans[i].SizeProto()
+ n += 2 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceSpans) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeSpans) - 1; i >= 0; i-- {
+ l = orig.ScopeSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DeprecatedScopeSpans) - 1; i >= 0; i-- {
+ l = orig.DeprecatedScopeSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3e
+ pos--
+ buf[pos] = 0xc2
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceSpans) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
+ err = orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 1000:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans())
+ err = orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceSpans() *ResourceSpans {
+ orig := NewResourceSpans()
+ orig.Resource = *GenTestResource()
+ orig.ScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()}
+ orig.SchemaUrl = "test_schemaurl"
+ orig.DeprecatedScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()}
+ return orig
+}
+
+func GenTestResourceSpansPtrSlice() []*ResourceSpans {
+ orig := make([]*ResourceSpans, 5)
+ orig[0] = NewResourceSpans()
+ orig[1] = GenTestResourceSpans()
+ orig[2] = NewResourceSpans()
+ orig[3] = GenTestResourceSpans()
+ orig[4] = NewResourceSpans()
+ return orig
+}
+
+func GenTestResourceSpansSlice() []ResourceSpans {
+ orig := make([]ResourceSpans, 5)
+ orig[1] = *GenTestResourceSpans()
+ orig[3] = *GenTestResourceSpans()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go
new file mode 100644
index 000000000..df89e9b63
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go
@@ -0,0 +1,451 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Sample represents each record value encountered within a profiled program.
+type Sample struct {
+ StackIndex int32
+ Values []int64
+ AttributeIndices []int32
+ LinkIndex int32
+ TimestampsUnixNano []uint64
+}
+
+var (
+ protoPoolSample = sync.Pool{
+ New: func() any {
+ return &Sample{}
+ },
+ }
+)
+
+func NewSample() *Sample {
+ if !UseProtoPooling.IsEnabled() {
+ return &Sample{}
+ }
+ return protoPoolSample.Get().(*Sample)
+}
+
+func DeleteSample(orig *Sample, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSample.Put(orig)
+ }
+}
+
+func CopySample(dest, src *Sample) *Sample {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSample()
+ }
+ dest.StackIndex = src.StackIndex
+
+ dest.Values = append(dest.Values[:0], src.Values...)
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+ dest.LinkIndex = src.LinkIndex
+
+ dest.TimestampsUnixNano = append(dest.TimestampsUnixNano[:0], src.TimestampsUnixNano...)
+
+ return dest
+}
+
+func CopySampleSlice(dest, src []Sample) []Sample {
+ var newDest []Sample
+ if cap(dest) < len(src) {
+ newDest = make([]Sample, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSample(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySample(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySamplePtrSlice(dest, src []*Sample) []*Sample {
+ var newDest []*Sample
+ if cap(dest) < len(src) {
+ newDest = make([]*Sample, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSample()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSample(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSample()
+ }
+ }
+ for i := range src {
+ CopySample(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Sample) Reset() {
+ *orig = Sample{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Sample) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.StackIndex != int32(0) {
+ dest.WriteObjectField("stackIndex")
+ dest.WriteInt32(orig.StackIndex)
+ }
+ if len(orig.Values) > 0 {
+ dest.WriteObjectField("values")
+ dest.WriteArrayStart()
+ dest.WriteInt64(orig.Values[0])
+ for i := 1; i < len(orig.Values); i++ {
+ dest.WriteMore()
+ dest.WriteInt64(orig.Values[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.AttributeIndices) > 0 {
+ dest.WriteObjectField("attributeIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.AttributeIndices[0])
+ for i := 1; i < len(orig.AttributeIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.AttributeIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.LinkIndex != int32(0) {
+ dest.WriteObjectField("linkIndex")
+ dest.WriteInt32(orig.LinkIndex)
+ }
+ if len(orig.TimestampsUnixNano) > 0 {
+ dest.WriteObjectField("timestampsUnixNano")
+ dest.WriteArrayStart()
+ dest.WriteUint64(orig.TimestampsUnixNano[0])
+ for i := 1; i < len(orig.TimestampsUnixNano); i++ {
+ dest.WriteMore()
+ dest.WriteUint64(orig.TimestampsUnixNano[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Sample) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "stackIndex", "stack_index":
+ orig.StackIndex = iter.ReadInt32()
+ case "values":
+ for iter.ReadArray() {
+ orig.Values = append(orig.Values, iter.ReadInt64())
+ }
+
+ case "attributeIndices", "attribute_indices":
+ for iter.ReadArray() {
+ orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
+ }
+
+ case "linkIndex", "link_index":
+ orig.LinkIndex = iter.ReadInt32()
+ case "timestampsUnixNano", "timestamps_unix_nano":
+ for iter.ReadArray() {
+ orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, iter.ReadUint64())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Sample) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.StackIndex != 0 {
+ n += 1 + proto.Sov(uint64(orig.StackIndex))
+ }
+ if len(orig.Values) > 0 {
+ l = 0
+ for _, e := range orig.Values {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if len(orig.AttributeIndices) > 0 {
+ l = 0
+ for _, e := range orig.AttributeIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.LinkIndex != 0 {
+ n += 1 + proto.Sov(uint64(orig.LinkIndex))
+ }
+ l = len(orig.TimestampsUnixNano)
+ if l > 0 {
+ l *= 8
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Sample) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.StackIndex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.StackIndex))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.Values)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Values[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.AttributeIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x1a
+ }
+ if orig.LinkIndex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.LinkIndex))
+ pos--
+ buf[pos] = 0x20
+ }
+ l = len(orig.TimestampsUnixNano)
+ if l > 0 {
+ for i := l - 1; i >= 0; i-- {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimestampsUnixNano[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(l*8))
+ pos--
+ buf[pos] = 0x2a
+ }
+ return len(buf) - pos
+}
+
+func (orig *Sample) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field StackIndex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StackIndex = int32(num)
+ case 2:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.Values = append(orig.Values, int64(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field Values", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.Values = append(orig.Values, int64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ case 3:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
+ }
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.LinkIndex = int32(num)
+ case 5:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ size := length / 8
+ orig.TimestampsUnixNano = make([]uint64, size)
+ var num uint64
+ for i := 0; i < size; i++ {
+ num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.TimestampsUnixNano[i] = uint64(num)
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field TimestampsUnixNano", pos-startPos)
+ }
+ case proto.WireTypeI64:
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, uint64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSample() *Sample {
+ orig := NewSample()
+ orig.StackIndex = int32(13)
+ orig.Values = []int64{int64(0), int64(13)}
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ orig.LinkIndex = int32(13)
+ orig.TimestampsUnixNano = []uint64{uint64(0), uint64(13)}
+ return orig
+}
+
+func GenTestSamplePtrSlice() []*Sample {
+ orig := make([]*Sample, 5)
+ orig[0] = NewSample()
+ orig[1] = GenTestSample()
+ orig[2] = NewSample()
+ orig[3] = GenTestSample()
+ orig[4] = NewSample()
+ return orig
+}
+
+func GenTestSampleSlice() []Sample {
+ orig := make([]Sample, 5)
+ orig[1] = *GenTestSample()
+ orig[3] = *GenTestSample()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go
new file mode 100644
index 000000000..8f303264c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeLogs is a collection of logs from a LibraryInstrumentation.
+type ScopeLogs struct {
+ Scope InstrumentationScope
+ LogRecords []*LogRecord
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeLogs = sync.Pool{
+ New: func() any {
+ return &ScopeLogs{}
+ },
+ }
+)
+
+func NewScopeLogs() *ScopeLogs {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeLogs{}
+ }
+ return protoPoolScopeLogs.Get().(*ScopeLogs)
+}
+
+func DeleteScopeLogs(orig *ScopeLogs, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.LogRecords {
+ DeleteLogRecord(orig.LogRecords[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeLogs.Put(orig)
+ }
+}
+
+func CopyScopeLogs(dest, src *ScopeLogs) *ScopeLogs {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeLogs()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.LogRecords = CopyLogRecordPtrSlice(dest.LogRecords, src.LogRecords)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeLogsSlice(dest, src []ScopeLogs) []ScopeLogs {
+ var newDest []ScopeLogs
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeLogs, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeLogs(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeLogs(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeLogsPtrSlice(dest, src []*ScopeLogs) []*ScopeLogs {
+ var newDest []*ScopeLogs
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeLogs, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeLogs()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeLogs(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeLogs()
+ }
+ }
+ for i := range src {
+ CopyScopeLogs(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeLogs) Reset() {
+ *orig = ScopeLogs{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeLogs) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.LogRecords) > 0 {
+ dest.WriteObjectField("logRecords")
+ dest.WriteArrayStart()
+ orig.LogRecords[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.LogRecords); i++ {
+ dest.WriteMore()
+ orig.LogRecords[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeLogs) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "logRecords", "log_records":
+ for iter.ReadArray() {
+ orig.LogRecords = append(orig.LogRecords, NewLogRecord())
+ orig.LogRecords[len(orig.LogRecords)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeLogs) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.LogRecords {
+ l = orig.LogRecords[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeLogs) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.LogRecords) - 1; i >= 0; i-- {
+ l = orig.LogRecords[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeLogs) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.LogRecords = append(orig.LogRecords, NewLogRecord())
+ err = orig.LogRecords[len(orig.LogRecords)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeLogs() *ScopeLogs {
+ orig := NewScopeLogs()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.LogRecords = []*LogRecord{{}, GenTestLogRecord()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeLogsPtrSlice() []*ScopeLogs {
+ orig := make([]*ScopeLogs, 5)
+ orig[0] = NewScopeLogs()
+ orig[1] = GenTestScopeLogs()
+ orig[2] = NewScopeLogs()
+ orig[3] = GenTestScopeLogs()
+ orig[4] = NewScopeLogs()
+ return orig
+}
+
+func GenTestScopeLogsSlice() []ScopeLogs {
+ orig := make([]ScopeLogs, 5)
+ orig[1] = *GenTestScopeLogs()
+ orig[3] = *GenTestScopeLogs()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go
new file mode 100644
index 000000000..fc521a401
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeMetrics is a collection of metrics from a LibraryInstrumentation.
+type ScopeMetrics struct {
+ Scope InstrumentationScope
+ Metrics []*Metric
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeMetrics = sync.Pool{
+ New: func() any {
+ return &ScopeMetrics{}
+ },
+ }
+)
+
+func NewScopeMetrics() *ScopeMetrics {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeMetrics{}
+ }
+ return protoPoolScopeMetrics.Get().(*ScopeMetrics)
+}
+
+func DeleteScopeMetrics(orig *ScopeMetrics, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.Metrics {
+ DeleteMetric(orig.Metrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeMetrics.Put(orig)
+ }
+}
+
+func CopyScopeMetrics(dest, src *ScopeMetrics) *ScopeMetrics {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeMetrics()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.Metrics = CopyMetricPtrSlice(dest.Metrics, src.Metrics)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeMetricsSlice(dest, src []ScopeMetrics) []ScopeMetrics {
+ var newDest []ScopeMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeMetrics, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeMetrics(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeMetrics(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeMetricsPtrSlice(dest, src []*ScopeMetrics) []*ScopeMetrics {
+ var newDest []*ScopeMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeMetrics, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeMetrics()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeMetrics(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeMetrics()
+ }
+ }
+ for i := range src {
+ CopyScopeMetrics(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeMetrics) Reset() {
+ *orig = ScopeMetrics{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeMetrics) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.Metrics) > 0 {
+ dest.WriteObjectField("metrics")
+ dest.WriteArrayStart()
+ orig.Metrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Metrics); i++ {
+ dest.WriteMore()
+ orig.Metrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeMetrics) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "metrics":
+ for iter.ReadArray() {
+ orig.Metrics = append(orig.Metrics, NewMetric())
+ orig.Metrics[len(orig.Metrics)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeMetrics) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Metrics {
+ l = orig.Metrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeMetrics) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Metrics) - 1; i >= 0; i-- {
+ l = orig.Metrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeMetrics) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Metrics = append(orig.Metrics, NewMetric())
+ err = orig.Metrics[len(orig.Metrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeMetrics() *ScopeMetrics {
+ orig := NewScopeMetrics()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.Metrics = []*Metric{{}, GenTestMetric()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeMetricsPtrSlice() []*ScopeMetrics {
+ orig := make([]*ScopeMetrics, 5)
+ orig[0] = NewScopeMetrics()
+ orig[1] = GenTestScopeMetrics()
+ orig[2] = NewScopeMetrics()
+ orig[3] = GenTestScopeMetrics()
+ orig[4] = NewScopeMetrics()
+ return orig
+}
+
+func GenTestScopeMetricsSlice() []ScopeMetrics {
+ orig := make([]ScopeMetrics, 5)
+ orig[1] = *GenTestScopeMetrics()
+ orig[3] = *GenTestScopeMetrics()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go
new file mode 100644
index 000000000..c17284b1b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeProfiles is a collection of profiles from a LibraryInstrumentation.
+type ScopeProfiles struct {
+ Scope InstrumentationScope
+ Profiles []*Profile
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeProfiles = sync.Pool{
+ New: func() any {
+ return &ScopeProfiles{}
+ },
+ }
+)
+
+func NewScopeProfiles() *ScopeProfiles {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeProfiles{}
+ }
+ return protoPoolScopeProfiles.Get().(*ScopeProfiles)
+}
+
+func DeleteScopeProfiles(orig *ScopeProfiles, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.Profiles {
+ DeleteProfile(orig.Profiles[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeProfiles.Put(orig)
+ }
+}
+
+func CopyScopeProfiles(dest, src *ScopeProfiles) *ScopeProfiles {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeProfiles()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.Profiles = CopyProfilePtrSlice(dest.Profiles, src.Profiles)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeProfilesSlice(dest, src []ScopeProfiles) []ScopeProfiles {
+ var newDest []ScopeProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeProfiles, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeProfiles(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeProfiles(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeProfilesPtrSlice(dest, src []*ScopeProfiles) []*ScopeProfiles {
+ var newDest []*ScopeProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeProfiles, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeProfiles()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeProfiles(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeProfiles()
+ }
+ }
+ for i := range src {
+ CopyScopeProfiles(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeProfiles) Reset() {
+ *orig = ScopeProfiles{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeProfiles) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.Profiles) > 0 {
+ dest.WriteObjectField("profiles")
+ dest.WriteArrayStart()
+ orig.Profiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Profiles); i++ {
+ dest.WriteMore()
+ orig.Profiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeProfiles) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "profiles":
+ for iter.ReadArray() {
+ orig.Profiles = append(orig.Profiles, NewProfile())
+ orig.Profiles[len(orig.Profiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeProfiles) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Profiles {
+ l = orig.Profiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeProfiles) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Profiles) - 1; i >= 0; i-- {
+ l = orig.Profiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeProfiles) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Profiles = append(orig.Profiles, NewProfile())
+ err = orig.Profiles[len(orig.Profiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeProfiles() *ScopeProfiles {
+ orig := NewScopeProfiles()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.Profiles = []*Profile{{}, GenTestProfile()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeProfilesPtrSlice() []*ScopeProfiles {
+ orig := make([]*ScopeProfiles, 5)
+ orig[0] = NewScopeProfiles()
+ orig[1] = GenTestScopeProfiles()
+ orig[2] = NewScopeProfiles()
+ orig[3] = GenTestScopeProfiles()
+ orig[4] = NewScopeProfiles()
+ return orig
+}
+
+func GenTestScopeProfilesSlice() []ScopeProfiles {
+ orig := make([]ScopeProfiles, 5)
+ orig[1] = *GenTestScopeProfiles()
+ orig[3] = *GenTestScopeProfiles()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go
new file mode 100644
index 000000000..a02a1c715
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeSpans is a collection of spans from a LibraryInstrumentation.
+type ScopeSpans struct {
+ Scope InstrumentationScope
+ Spans []*Span
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeSpans = sync.Pool{
+ New: func() any {
+ return &ScopeSpans{}
+ },
+ }
+)
+
+func NewScopeSpans() *ScopeSpans {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeSpans{}
+ }
+ return protoPoolScopeSpans.Get().(*ScopeSpans)
+}
+
+func DeleteScopeSpans(orig *ScopeSpans, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.Spans {
+ DeleteSpan(orig.Spans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeSpans.Put(orig)
+ }
+}
+
+func CopyScopeSpans(dest, src *ScopeSpans) *ScopeSpans {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeSpans()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.Spans = CopySpanPtrSlice(dest.Spans, src.Spans)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeSpansSlice(dest, src []ScopeSpans) []ScopeSpans {
+ var newDest []ScopeSpans
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeSpans, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeSpans(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeSpans(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeSpansPtrSlice(dest, src []*ScopeSpans) []*ScopeSpans {
+ var newDest []*ScopeSpans
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeSpans, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeSpans()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeSpans(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeSpans()
+ }
+ }
+ for i := range src {
+ CopyScopeSpans(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeSpans) Reset() {
+ *orig = ScopeSpans{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeSpans) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.Spans) > 0 {
+ dest.WriteObjectField("spans")
+ dest.WriteArrayStart()
+ orig.Spans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Spans); i++ {
+ dest.WriteMore()
+ orig.Spans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeSpans) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "spans":
+ for iter.ReadArray() {
+ orig.Spans = append(orig.Spans, NewSpan())
+ orig.Spans[len(orig.Spans)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeSpans) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Spans {
+ l = orig.Spans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeSpans) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Spans) - 1; i >= 0; i-- {
+ l = orig.Spans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeSpans) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Spans = append(orig.Spans, NewSpan())
+ err = orig.Spans[len(orig.Spans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeSpans() *ScopeSpans {
+ orig := NewScopeSpans()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.Spans = []*Span{{}, GenTestSpan()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeSpansPtrSlice() []*ScopeSpans {
+ orig := make([]*ScopeSpans, 5)
+ orig[0] = NewScopeSpans()
+ orig[1] = GenTestScopeSpans()
+ orig[2] = NewScopeSpans()
+ orig[3] = GenTestScopeSpans()
+ orig[4] = NewScopeSpans()
+ return orig
+}
+
+func GenTestScopeSpansSlice() []ScopeSpans {
+ orig := make([]ScopeSpans, 5)
+ orig[1] = *GenTestScopeSpans()
+ orig[3] = *GenTestScopeSpans()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go
similarity index 65%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go
index d53678b0b..7d0ef5a60 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go
@@ -11,29 +11,47 @@ import (
"fmt"
"sync"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// Span represents a single operation within a trace.
+// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
+type Span struct {
+ TraceId TraceID
+ SpanId SpanID
+ TraceState string
+ ParentSpanId SpanID
+ Flags uint32
+ Name string
+ Kind SpanKind
+ StartTimeUnixNano uint64
+ EndTimeUnixNano uint64
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ Events []*SpanEvent
+ DroppedEventsCount uint32
+ Links []*SpanLink
+ DroppedLinksCount uint32
+ Status Status
+}
+
var (
protoPoolSpan = sync.Pool{
New: func() any {
- return &otlptrace.Span{}
+ return &Span{}
},
}
)
-func NewOrigSpan() *otlptrace.Span {
+func NewSpan() *Span {
if !UseProtoPooling.IsEnabled() {
- return &otlptrace.Span{}
+ return &Span{}
}
- return protoPoolSpan.Get().(*otlptrace.Span)
+ return protoPoolSpan.Get().(*Span)
}
-func DeleteOrigSpan(orig *otlptrace.Span, nullable bool) {
+func DeleteSpan(orig *Span, nullable bool) {
if orig == nil {
return
}
@@ -43,19 +61,19 @@ func DeleteOrigSpan(orig *otlptrace.Span, nullable bool) {
return
}
- DeleteOrigTraceID(&orig.TraceId, false)
- DeleteOrigSpanID(&orig.SpanId, false)
- DeleteOrigSpanID(&orig.ParentSpanId, false)
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
+ DeleteSpanID(&orig.ParentSpanId, false)
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.Events {
- DeleteOrigSpan_Event(orig.Events[i], true)
+ DeleteSpanEvent(orig.Events[i], true)
}
for i := range orig.Links {
- DeleteOrigSpan_Link(orig.Links[i], true)
+ DeleteSpanLink(orig.Links[i], true)
}
- DeleteOrigStatus(&orig.Status, false)
+ DeleteStatus(&orig.Status, false)
orig.Reset()
if nullable {
@@ -63,68 +81,124 @@ func DeleteOrigSpan(orig *otlptrace.Span, nullable bool) {
}
}
-func CopyOrigSpan(dest, src *otlptrace.Span) {
+func CopySpan(dest, src *Span) *Span {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpan()
}
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
- CopyOrigTraceState(&dest.TraceState, &src.TraceState)
- dest.ParentSpanId = src.ParentSpanId
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ dest.TraceState = src.TraceState
+
+ CopySpanID(&dest.ParentSpanId, &src.ParentSpanId)
+
dest.Flags = src.Flags
+
dest.Name = src.Name
+
dest.Kind = src.Kind
+
dest.StartTimeUnixNano = src.StartTimeUnixNano
+
dest.EndTimeUnixNano = src.EndTimeUnixNano
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.DroppedAttributesCount = src.DroppedAttributesCount
- dest.Events = CopyOrigSpan_EventSlice(dest.Events, src.Events)
+
+ dest.Events = CopySpanEventPtrSlice(dest.Events, src.Events)
+
dest.DroppedEventsCount = src.DroppedEventsCount
- dest.Links = CopyOrigSpan_LinkSlice(dest.Links, src.Links)
+
+ dest.Links = CopySpanLinkPtrSlice(dest.Links, src.Links)
+
dest.DroppedLinksCount = src.DroppedLinksCount
- CopyOrigStatus(&dest.Status, &src.Status)
+
+ CopyStatus(&dest.Status, &src.Status)
+
+ return dest
}
-func GenTestOrigSpan() *otlptrace.Span {
- orig := NewOrigSpan()
- orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
- orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
- orig.TraceState = *GenTestOrigTraceState()
- orig.ParentSpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
- orig.Flags = uint32(13)
- orig.Name = "test_name"
- orig.Kind = otlptrace.Span_SpanKind(3)
- orig.StartTimeUnixNano = 1234567890
- orig.EndTimeUnixNano = 1234567890
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.DroppedAttributesCount = uint32(13)
- orig.Events = GenerateOrigTestSpan_EventSlice()
- orig.DroppedEventsCount = uint32(13)
- orig.Links = GenerateOrigTestSpan_LinkSlice()
- orig.DroppedLinksCount = uint32(13)
- orig.Status = *GenTestOrigStatus()
- return orig
+func CopySpanSlice(dest, src []Span) []Span {
+ var newDest []Span
+ if cap(dest) < len(src) {
+ newDest = make([]Span, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpan(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpan(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySpanPtrSlice(dest, src []*Span) []*Span {
+ var newDest []*Span
+ if cap(dest) < len(src) {
+ newDest = make([]*Span, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpan()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpan(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpan()
+ }
+ }
+ for i := range src {
+ CopySpan(newDest[i], src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) {
+func (orig *Span) Reset() {
+ *orig = Span{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Span) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
- if orig.TraceId != data.TraceID([16]byte{}) {
+ if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
- MarshalJSONOrigTraceID(&orig.TraceId, dest)
+ orig.TraceId.MarshalJSON(dest)
}
- if orig.SpanId != data.SpanID([8]byte{}) {
+ if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
- MarshalJSONOrigSpanID(&orig.SpanId, dest)
+ orig.SpanId.MarshalJSON(dest)
}
if orig.TraceState != "" {
dest.WriteObjectField("traceState")
dest.WriteString(orig.TraceState)
}
- if orig.ParentSpanId != data.SpanID([8]byte{}) {
+ if !orig.ParentSpanId.IsEmpty() {
dest.WriteObjectField("parentSpanId")
- MarshalJSONOrigSpanID(&orig.ParentSpanId, dest)
+ orig.ParentSpanId.MarshalJSON(dest)
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
@@ -150,10 +224,10 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) {
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -164,10 +238,10 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) {
if len(orig.Events) > 0 {
dest.WriteObjectField("events")
dest.WriteArrayStart()
- MarshalJSONOrigSpan_Event(orig.Events[0], dest)
+ orig.Events[0].MarshalJSON(dest)
for i := 1; i < len(orig.Events); i++ {
dest.WriteMore()
- MarshalJSONOrigSpan_Event(orig.Events[i], dest)
+ orig.Events[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -178,10 +252,10 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) {
if len(orig.Links) > 0 {
dest.WriteObjectField("links")
dest.WriteArrayStart()
- MarshalJSONOrigSpan_Link(orig.Links[0], dest)
+ orig.Links[0].MarshalJSON(dest)
for i := 1; i < len(orig.Links); i++ {
dest.WriteMore()
- MarshalJSONOrigSpan_Link(orig.Links[i], dest)
+ orig.Links[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -190,77 +264,81 @@ func MarshalJSONOrigSpan(orig *otlptrace.Span, dest *json.Stream) {
dest.WriteUint32(orig.DroppedLinksCount)
}
dest.WriteObjectField("status")
- MarshalJSONOrigStatus(&orig.Status, dest)
+ orig.Status.MarshalJSON(dest)
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigSpan unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSpan(orig *otlptrace.Span, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Span) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
- UnmarshalJSONOrigTraceID(&orig.TraceId, iter)
+
+ orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
- UnmarshalJSONOrigSpanID(&orig.SpanId, iter)
+
+ orig.SpanId.UnmarshalJSON(iter)
case "traceState", "trace_state":
orig.TraceState = iter.ReadString()
case "parentSpanId", "parent_span_id":
- UnmarshalJSONOrigSpanID(&orig.ParentSpanId, iter)
+
+ orig.ParentSpanId.UnmarshalJSON(iter)
case "flags":
orig.Flags = iter.ReadUint32()
case "name":
orig.Name = iter.ReadString()
case "kind":
- orig.Kind = otlptrace.Span_SpanKind(iter.ReadEnumValue(otlptrace.Span_SpanKind_value))
+ orig.Kind = SpanKind(iter.ReadEnumValue(SpanKind_value))
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "endTimeUnixNano", "end_time_unix_nano":
orig.EndTimeUnixNano = iter.ReadUint64()
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "events":
for iter.ReadArray() {
- orig.Events = append(orig.Events, NewOrigSpan_Event())
- UnmarshalJSONOrigSpan_Event(orig.Events[len(orig.Events)-1], iter)
+ orig.Events = append(orig.Events, NewSpanEvent())
+ orig.Events[len(orig.Events)-1].UnmarshalJSON(iter)
}
case "droppedEventsCount", "dropped_events_count":
orig.DroppedEventsCount = iter.ReadUint32()
case "links":
for iter.ReadArray() {
- orig.Links = append(orig.Links, NewOrigSpan_Link())
- UnmarshalJSONOrigSpan_Link(orig.Links[len(orig.Links)-1], iter)
+ orig.Links = append(orig.Links, NewSpanLink())
+ orig.Links[len(orig.Links)-1].UnmarshalJSON(iter)
}
case "droppedLinksCount", "dropped_links_count":
orig.DroppedLinksCount = iter.ReadUint32()
case "status":
- UnmarshalJSONOrigStatus(&orig.Status, iter)
+
+ orig.Status.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
-func SizeProtoOrigSpan(orig *otlptrace.Span) int {
+func (orig *Span) SizeProto() int {
var n int
var l int
_ = l
- l = SizeProtoOrigTraceID(&orig.TraceId)
+ l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
- l = SizeProtoOrigSpanID(&orig.SpanId)
+ l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.TraceState)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
- l = SizeProtoOrigSpanID(&orig.ParentSpanId)
+ l = orig.ParentSpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.Flags != 0 {
n += 6
@@ -279,43 +357,42 @@ func SizeProtoOrigSpan(orig *otlptrace.Span) int {
n += 9
}
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
for i := range orig.Events {
- l = SizeProtoOrigSpan_Event(orig.Events[i])
+ l = orig.Events[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedEventsCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedEventsCount))
}
for i := range orig.Links {
- l = SizeProtoOrigSpan_Link(orig.Links[i])
+ l = orig.Links[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedLinksCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedLinksCount))
}
- l = SizeProtoOrigStatus(&orig.Status)
+ l = orig.Status.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
-func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int {
+func (orig *Span) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
-
- l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos])
+ l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
- l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos])
+ l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -329,8 +406,7 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int {
pos--
buf[pos] = 0x1a
}
-
- l = MarshalProtoOrigSpanID(&orig.ParentSpanId, buf[:pos])
+ l = orig.ParentSpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -370,7 +446,7 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int {
buf[pos] = 0x41
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -382,7 +458,7 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int {
buf[pos] = 0x50
}
for i := len(orig.Events) - 1; i >= 0; i-- {
- l = MarshalProtoOrigSpan_Event(orig.Events[i], buf[:pos])
+ l = orig.Events[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -394,7 +470,7 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int {
buf[pos] = 0x60
}
for i := len(orig.Links) - 1; i >= 0; i-- {
- l = MarshalProtoOrigSpan_Link(orig.Links[i], buf[:pos])
+ l = orig.Links[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -405,8 +481,7 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int {
pos--
buf[pos] = 0x70
}
-
- l = MarshalProtoOrigStatus(&orig.Status, buf[:pos])
+ l = orig.Status.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -415,7 +490,7 @@ func MarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) int {
return len(buf) - pos
}
-func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
+func (orig *Span) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -441,7 +516,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos])
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -457,7 +532,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos])
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -485,7 +560,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigSpanID(&orig.ParentSpanId, buf[startPos:pos])
+ err = orig.ParentSpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -524,7 +599,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
return err
}
- orig.Kind = otlptrace.Span_SpanKind(num)
+ orig.Kind = SpanKind(num)
case 7:
if wireType != proto.WireTypeI64 {
@@ -560,8 +635,8 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -588,8 +663,8 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
return err
}
startPos := pos - length
- orig.Events = append(orig.Events, NewOrigSpan_Event())
- err = UnmarshalProtoOrigSpan_Event(orig.Events[len(orig.Events)-1], buf[startPos:pos])
+ orig.Events = append(orig.Events, NewSpanEvent())
+ err = orig.Events[len(orig.Events)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -616,8 +691,8 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
return err
}
startPos := pos - length
- orig.Links = append(orig.Links, NewOrigSpan_Link())
- err = UnmarshalProtoOrigSpan_Link(orig.Links[len(orig.Links)-1], buf[startPos:pos])
+ orig.Links = append(orig.Links, NewSpanLink())
+ err = orig.Links[len(orig.Links)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -645,7 +720,7 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigStatus(&orig.Status, buf[startPos:pos])
+ err = orig.Status.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -658,3 +733,41 @@ func UnmarshalProtoOrigSpan(orig *otlptrace.Span, buf []byte) error {
}
return nil
}
+
+func GenTestSpan() *Span {
+ orig := NewSpan()
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ orig.TraceState = "test_tracestate"
+ orig.ParentSpanId = *GenTestSpanID()
+ orig.Flags = uint32(13)
+ orig.Name = "test_name"
+ orig.Kind = SpanKind(13)
+ orig.StartTimeUnixNano = uint64(13)
+ orig.EndTimeUnixNano = uint64(13)
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.Events = []*SpanEvent{{}, GenTestSpanEvent()}
+ orig.DroppedEventsCount = uint32(13)
+ orig.Links = []*SpanLink{{}, GenTestSpanLink()}
+ orig.DroppedLinksCount = uint32(13)
+ orig.Status = *GenTestStatus()
+ return orig
+}
+
+func GenTestSpanPtrSlice() []*Span {
+ orig := make([]*Span, 5)
+ orig[0] = NewSpan()
+ orig[1] = GenTestSpan()
+ orig[2] = NewSpan()
+ orig[3] = GenTestSpan()
+ orig[4] = NewSpan()
+ return orig
+}
+
+func GenTestSpanSlice() []Span {
+ orig := make([]Span, 5)
+ orig[1] = *GenTestSpan()
+ orig[3] = *GenTestSpan()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go
new file mode 100644
index 000000000..7d7878700
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go
@@ -0,0 +1,367 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type SpanContext struct {
+ TraceID TraceID
+ SpanID SpanID
+ TraceFlags uint32
+ TraceState string
+ Remote bool
+}
+
+var (
+ protoPoolSpanContext = sync.Pool{
+ New: func() any {
+ return &SpanContext{}
+ },
+ }
+)
+
+func NewSpanContext() *SpanContext {
+ if !UseProtoPooling.IsEnabled() {
+ return &SpanContext{}
+ }
+ return protoPoolSpanContext.Get().(*SpanContext)
+}
+
+func DeleteSpanContext(orig *SpanContext, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteTraceID(&orig.TraceID, false)
+ DeleteSpanID(&orig.SpanID, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolSpanContext.Put(orig)
+ }
+}
+
+func CopySpanContext(dest, src *SpanContext) *SpanContext {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpanContext()
+ }
+ CopyTraceID(&dest.TraceID, &src.TraceID)
+
+ CopySpanID(&dest.SpanID, &src.SpanID)
+
+ dest.TraceFlags = src.TraceFlags
+
+ dest.TraceState = src.TraceState
+
+ dest.Remote = src.Remote
+
+ return dest
+}
+
+func CopySpanContextSlice(dest, src []SpanContext) []SpanContext {
+ var newDest []SpanContext
+ if cap(dest) < len(src) {
+ newDest = make([]SpanContext, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanContext(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpanContext(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySpanContextPtrSlice(dest, src []*SpanContext) []*SpanContext {
+ var newDest []*SpanContext
+ if cap(dest) < len(src) {
+ newDest = make([]*SpanContext, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanContext()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanContext(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanContext()
+ }
+ }
+ for i := range src {
+ CopySpanContext(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SpanContext) Reset() {
+ *orig = SpanContext{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SpanContext) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if !orig.TraceID.IsEmpty() {
+ dest.WriteObjectField("traceID")
+ orig.TraceID.MarshalJSON(dest)
+ }
+ if !orig.SpanID.IsEmpty() {
+ dest.WriteObjectField("spanID")
+ orig.SpanID.MarshalJSON(dest)
+ }
+ if orig.TraceFlags != uint32(0) {
+ dest.WriteObjectField("traceFlags")
+ dest.WriteUint32(orig.TraceFlags)
+ }
+ if orig.TraceState != "" {
+ dest.WriteObjectField("traceState")
+ dest.WriteString(orig.TraceState)
+ }
+ if orig.Remote != false {
+ dest.WriteObjectField("remote")
+ dest.WriteBool(orig.Remote)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SpanContext) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "traceID", "trace_id":
+
+ orig.TraceID.UnmarshalJSON(iter)
+ case "spanID", "span_id":
+
+ orig.SpanID.UnmarshalJSON(iter)
+ case "traceFlags", "trace_flags":
+ orig.TraceFlags = iter.ReadUint32()
+ case "traceState", "trace_state":
+ orig.TraceState = iter.ReadString()
+ case "remote":
+ orig.Remote = iter.ReadBool()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *SpanContext) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.TraceID.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanID.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.TraceFlags != 0 {
+ n += 5
+ }
+ l = len(orig.TraceState)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Remote {
+ n += 2
+ }
+ return n
+}
+
+func (orig *SpanContext) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.TraceID.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ l = orig.SpanID.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ if orig.TraceFlags != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.TraceFlags))
+ pos--
+ buf[pos] = 0x1d
+ }
+ l = len(orig.TraceState)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.TraceState)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ if orig.Remote {
+ pos--
+ if orig.Remote {
+ buf[pos] = 1
+ } else {
+ buf[pos] = 0
+ }
+ pos--
+ buf[pos] = 0x28
+ }
+ return len(buf) - pos
+}
+
+func (orig *SpanContext) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceID.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanID.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceFlags", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TraceFlags = uint32(num)
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.TraceState = string(buf[startPos:pos])
+
+ case 5:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Remote = num != 0
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSpanContext() *SpanContext {
+ orig := NewSpanContext()
+ orig.TraceID = *GenTestTraceID()
+ orig.SpanID = *GenTestSpanID()
+ orig.TraceFlags = uint32(13)
+ orig.TraceState = "test_tracestate"
+ orig.Remote = true
+ return orig
+}
+
+func GenTestSpanContextPtrSlice() []*SpanContext {
+ orig := make([]*SpanContext, 5)
+ orig[0] = NewSpanContext()
+ orig[1] = GenTestSpanContext()
+ orig[2] = NewSpanContext()
+ orig[3] = GenTestSpanContext()
+ orig[4] = NewSpanContext()
+ return orig
+}
+
+func GenTestSpanContextSlice() []SpanContext {
+ orig := make([]SpanContext, 5)
+ orig[1] = *GenTestSpanContext()
+ orig[3] = *GenTestSpanContext()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_event.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go
similarity index 54%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_event.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go
index 8931b6267..14c0fe162 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_event.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go
@@ -11,28 +11,35 @@ import (
"fmt"
"sync"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs. See OTLP for event definition.
+type SpanEvent struct {
+ TimeUnixNano uint64
+ Name string
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+}
+
var (
- protoPoolSpan_Event = sync.Pool{
+ protoPoolSpanEvent = sync.Pool{
New: func() any {
- return &otlptrace.Span_Event{}
+ return &SpanEvent{}
},
}
)
-func NewOrigSpan_Event() *otlptrace.Span_Event {
+func NewSpanEvent() *SpanEvent {
if !UseProtoPooling.IsEnabled() {
- return &otlptrace.Span_Event{}
+ return &SpanEvent{}
}
- return protoPoolSpan_Event.Get().(*otlptrace.Span_Event)
+ return protoPoolSpanEvent.Get().(*SpanEvent)
}
-func DeleteOrigSpan_Event(orig *otlptrace.Span_Event, nullable bool) {
+func DeleteSpanEvent(orig *SpanEvent, nullable bool) {
if orig == nil {
return
}
@@ -43,37 +50,93 @@ func DeleteOrigSpan_Event(orig *otlptrace.Span_Event, nullable bool) {
}
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
- protoPoolSpan_Event.Put(orig)
+ protoPoolSpanEvent.Put(orig)
}
}
-func CopyOrigSpan_Event(dest, src *otlptrace.Span_Event) {
+func CopySpanEvent(dest, src *SpanEvent) *SpanEvent {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpanEvent()
}
dest.TimeUnixNano = src.TimeUnixNano
+
dest.Name = src.Name
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ return dest
}
-func GenTestOrigSpan_Event() *otlptrace.Span_Event {
- orig := NewOrigSpan_Event()
- orig.TimeUnixNano = 1234567890
- orig.Name = "test_name"
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.DroppedAttributesCount = uint32(13)
- return orig
+func CopySpanEventSlice(dest, src []SpanEvent) []SpanEvent {
+ var newDest []SpanEvent
+ if cap(dest) < len(src) {
+ newDest = make([]SpanEvent, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanEvent(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpanEvent(&newDest[i], &src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, dest *json.Stream) {
+func CopySpanEventPtrSlice(dest, src []*SpanEvent) []*SpanEvent {
+ var newDest []*SpanEvent
+ if cap(dest) < len(src) {
+ newDest = make([]*SpanEvent, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanEvent()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanEvent(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanEvent()
+ }
+ }
+ for i := range src {
+ CopySpanEvent(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SpanEvent) Reset() {
+ *orig = SpanEvent{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SpanEvent) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
@@ -86,10 +149,10 @@ func MarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, dest *json.Stream) {
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -100,8 +163,8 @@ func MarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, dest *json.Stream) {
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigSpanEvent unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SpanEvent) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "timeUnixNano", "time_unix_nano":
@@ -110,8 +173,8 @@ func UnmarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, iter *json.Iterator
orig.Name = iter.ReadString()
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
@@ -122,7 +185,7 @@ func UnmarshalJSONOrigSpan_Event(orig *otlptrace.Span_Event, iter *json.Iterator
}
}
-func SizeProtoOrigSpan_Event(orig *otlptrace.Span_Event) int {
+func (orig *SpanEvent) SizeProto() int {
var n int
var l int
_ = l
@@ -134,7 +197,7 @@ func SizeProtoOrigSpan_Event(orig *otlptrace.Span_Event) int {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
@@ -143,7 +206,7 @@ func SizeProtoOrigSpan_Event(orig *otlptrace.Span_Event) int {
return n
}
-func MarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) int {
+func (orig *SpanEvent) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
@@ -162,7 +225,7 @@ func MarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) int {
buf[pos] = 0x12
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -176,7 +239,7 @@ func MarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) int {
return len(buf) - pos
}
-func UnmarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) error {
+func (orig *SpanEvent) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -225,8 +288,8 @@ func UnmarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) error
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -251,3 +314,29 @@ func UnmarshalProtoOrigSpan_Event(orig *otlptrace.Span_Event, buf []byte) error
}
return nil
}
+
+func GenTestSpanEvent() *SpanEvent {
+ orig := NewSpanEvent()
+ orig.TimeUnixNano = uint64(13)
+ orig.Name = "test_name"
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ return orig
+}
+
+func GenTestSpanEventPtrSlice() []*SpanEvent {
+ orig := make([]*SpanEvent, 5)
+ orig[0] = NewSpanEvent()
+ orig[1] = GenTestSpanEvent()
+ orig[2] = NewSpanEvent()
+ orig[3] = GenTestSpanEvent()
+ orig[4] = NewSpanEvent()
+ return orig
+}
+
+func GenTestSpanEventSlice() []SpanEvent {
+ orig := make([]SpanEvent, 5)
+ orig[1] = *GenTestSpanEvent()
+ orig[3] = *GenTestSpanEvent()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_link.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go
similarity index 54%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_link.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go
index 0adb3bf26..07c42640b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_link.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go
@@ -11,29 +11,38 @@ import (
"fmt"
"sync"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// SpanLink is a pointer from the current span to another span in the same trace or in a
+// different trace.
+// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
+type SpanLink struct {
+ TraceId TraceID
+ SpanId SpanID
+ TraceState string
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ Flags uint32
+}
+
var (
- protoPoolSpan_Link = sync.Pool{
+ protoPoolSpanLink = sync.Pool{
New: func() any {
- return &otlptrace.Span_Link{}
+ return &SpanLink{}
},
}
)
-func NewOrigSpan_Link() *otlptrace.Span_Link {
+func NewSpanLink() *SpanLink {
if !UseProtoPooling.IsEnabled() {
- return &otlptrace.Span_Link{}
+ return &SpanLink{}
}
- return protoPoolSpan_Link.Get().(*otlptrace.Span_Link)
+ return protoPoolSpanLink.Get().(*SpanLink)
}
-func DeleteOrigSpan_Link(orig *otlptrace.Span_Link, nullable bool) {
+func DeleteSpanLink(orig *SpanLink, nullable bool) {
if orig == nil {
return
}
@@ -43,52 +52,108 @@ func DeleteOrigSpan_Link(orig *otlptrace.Span_Link, nullable bool) {
return
}
- DeleteOrigTraceID(&orig.TraceId, false)
- DeleteOrigSpanID(&orig.SpanId, false)
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
- protoPoolSpan_Link.Put(orig)
+ protoPoolSpanLink.Put(orig)
}
}
-func CopyOrigSpan_Link(dest, src *otlptrace.Span_Link) {
+func CopySpanLink(dest, src *SpanLink) *SpanLink {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
}
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
- CopyOrigTraceState(&dest.TraceState, &src.TraceState)
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpanLink()
+ }
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ dest.TraceState = src.TraceState
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.DroppedAttributesCount = src.DroppedAttributesCount
+
dest.Flags = src.Flags
+
+ return dest
}
-func GenTestOrigSpan_Link() *otlptrace.Span_Link {
- orig := NewOrigSpan_Link()
- orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
- orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
- orig.TraceState = *GenTestOrigTraceState()
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.DroppedAttributesCount = uint32(13)
- orig.Flags = uint32(13)
- return orig
+func CopySpanLinkSlice(dest, src []SpanLink) []SpanLink {
+ var newDest []SpanLink
+ if cap(dest) < len(src) {
+ newDest = make([]SpanLink, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanLink(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpanLink(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySpanLinkPtrSlice(dest, src []*SpanLink) []*SpanLink {
+ var newDest []*SpanLink
+ if cap(dest) < len(src) {
+ newDest = make([]*SpanLink, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanLink()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanLink(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanLink()
+ }
+ }
+ for i := range src {
+ CopySpanLink(newDest[i], src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, dest *json.Stream) {
+func (orig *SpanLink) Reset() {
+ *orig = SpanLink{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SpanLink) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
- if orig.TraceId != data.TraceID([16]byte{}) {
+ if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
- MarshalJSONOrigTraceID(&orig.TraceId, dest)
+ orig.TraceId.MarshalJSON(dest)
}
- if orig.SpanId != data.SpanID([8]byte{}) {
+ if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
- MarshalJSONOrigSpanID(&orig.SpanId, dest)
+ orig.SpanId.MarshalJSON(dest)
}
if orig.TraceState != "" {
dest.WriteObjectField("traceState")
@@ -97,10 +162,10 @@ func MarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, dest *json.Stream) {
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -115,20 +180,22 @@ func MarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, dest *json.Stream) {
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigSpanLink unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SpanLink) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
- UnmarshalJSONOrigTraceID(&orig.TraceId, iter)
+
+ orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
- UnmarshalJSONOrigSpanID(&orig.SpanId, iter)
+
+ orig.SpanId.UnmarshalJSON(iter)
case "traceState", "trace_state":
orig.TraceState = iter.ReadString()
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
@@ -141,20 +208,20 @@ func UnmarshalJSONOrigSpan_Link(orig *otlptrace.Span_Link, iter *json.Iterator)
}
}
-func SizeProtoOrigSpan_Link(orig *otlptrace.Span_Link) int {
+func (orig *SpanLink) SizeProto() int {
var n int
var l int
_ = l
- l = SizeProtoOrigTraceID(&orig.TraceId)
+ l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
- l = SizeProtoOrigSpanID(&orig.SpanId)
+ l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.TraceState)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
@@ -166,18 +233,17 @@ func SizeProtoOrigSpan_Link(orig *otlptrace.Span_Link) int {
return n
}
-func MarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) int {
+func (orig *SpanLink) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
-
- l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos])
+ l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
- l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos])
+ l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -192,7 +258,7 @@ func MarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) int {
buf[pos] = 0x1a
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -212,7 +278,7 @@ func MarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) int {
return len(buf) - pos
}
-func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error {
+func (orig *SpanLink) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -238,7 +304,7 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos])
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -254,7 +320,7 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error {
}
startPos := pos - length
- err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos])
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -281,8 +347,8 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error {
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -319,3 +385,31 @@ func UnmarshalProtoOrigSpan_Link(orig *otlptrace.Span_Link, buf []byte) error {
}
return nil
}
+
+func GenTestSpanLink() *SpanLink {
+ orig := NewSpanLink()
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ orig.TraceState = "test_tracestate"
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.Flags = uint32(13)
+ return orig
+}
+
+func GenTestSpanLinkPtrSlice() []*SpanLink {
+ orig := make([]*SpanLink, 5)
+ orig[0] = NewSpanLink()
+ orig[1] = GenTestSpanLink()
+ orig[2] = NewSpanLink()
+ orig[3] = GenTestSpanLink()
+ orig[4] = NewSpanLink()
+ return orig
+}
+
+func GenTestSpanLinkSlice() []SpanLink {
+ orig := make([]SpanLink, 5)
+ orig[1] = *GenTestSpanLink()
+ orig[3] = *GenTestSpanLink()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go
new file mode 100644
index 000000000..a97599bfc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go
@@ -0,0 +1,261 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Stack represents a stack trace as a list of locations.
+
+type Stack struct {
+ LocationIndices []int32
+}
+
+var (
+ protoPoolStack = sync.Pool{
+ New: func() any {
+ return &Stack{}
+ },
+ }
+)
+
+func NewStack() *Stack {
+ if !UseProtoPooling.IsEnabled() {
+ return &Stack{}
+ }
+ return protoPoolStack.Get().(*Stack)
+}
+
+func DeleteStack(orig *Stack, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolStack.Put(orig)
+ }
+}
+
+func CopyStack(dest, src *Stack) *Stack {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewStack()
+ }
+ dest.LocationIndices = append(dest.LocationIndices[:0], src.LocationIndices...)
+
+ return dest
+}
+
+func CopyStackSlice(dest, src []Stack) []Stack {
+ var newDest []Stack
+ if cap(dest) < len(src) {
+ newDest = make([]Stack, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStack(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyStack(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyStackPtrSlice(dest, src []*Stack) []*Stack {
+ var newDest []*Stack
+ if cap(dest) < len(src) {
+ newDest = make([]*Stack, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStack()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStack(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStack()
+ }
+ }
+ for i := range src {
+ CopyStack(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Stack) Reset() {
+ *orig = Stack{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Stack) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.LocationIndices) > 0 {
+ dest.WriteObjectField("locationIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.LocationIndices[0])
+ for i := 1; i < len(orig.LocationIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.LocationIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Stack) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "locationIndices", "location_indices":
+ for iter.ReadArray() {
+ orig.LocationIndices = append(orig.LocationIndices, iter.ReadInt32())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Stack) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if len(orig.LocationIndices) > 0 {
+ l = 0
+ for _, e := range orig.LocationIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Stack) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.LocationIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.LocationIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *Stack) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+ case 1:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.LocationIndices = append(orig.LocationIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field LocationIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.LocationIndices = append(orig.LocationIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestStack() *Stack {
+ orig := NewStack()
+ orig.LocationIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestStackPtrSlice() []*Stack {
+ orig := make([]*Stack, 5)
+ orig[0] = NewStack()
+ orig[1] = GenTestStack()
+ orig[2] = NewStack()
+ orig[3] = GenTestStack()
+ orig[4] = NewStack()
+ return orig
+}
+
+func GenTestStackSlice() []Stack {
+ orig := make([]Stack, 5)
+ orig[1] = *GenTestStack()
+ orig[3] = *GenTestStack()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go
new file mode 100644
index 000000000..c2f2d3775
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go
@@ -0,0 +1,260 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Status is an optional final status for this span. Semantically, when Status was not
+// set, that means the span ended without errors and to assume Status.Ok (code = 0).
+type Status struct {
+ Message string
+ Code StatusCode
+}
+
+var (
+ protoPoolStatus = sync.Pool{
+ New: func() any {
+ return &Status{}
+ },
+ }
+)
+
+func NewStatus() *Status {
+ if !UseProtoPooling.IsEnabled() {
+ return &Status{}
+ }
+ return protoPoolStatus.Get().(*Status)
+}
+
+func DeleteStatus(orig *Status, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolStatus.Put(orig)
+ }
+}
+
+func CopyStatus(dest, src *Status) *Status {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewStatus()
+ }
+ dest.Message = src.Message
+
+ dest.Code = src.Code
+
+ return dest
+}
+
+func CopyStatusSlice(dest, src []Status) []Status {
+ var newDest []Status
+ if cap(dest) < len(src) {
+ newDest = make([]Status, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStatus(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyStatus(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyStatusPtrSlice(dest, src []*Status) []*Status {
+ var newDest []*Status
+ if cap(dest) < len(src) {
+ newDest = make([]*Status, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStatus()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStatus(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStatus()
+ }
+ }
+ for i := range src {
+ CopyStatus(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Status) Reset() {
+ *orig = Status{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Status) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Message != "" {
+ dest.WriteObjectField("message")
+ dest.WriteString(orig.Message)
+ }
+
+ if int32(orig.Code) != 0 {
+ dest.WriteObjectField("code")
+ dest.WriteInt32(int32(orig.Code))
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Status) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "message":
+ orig.Message = iter.ReadString()
+ case "code":
+ orig.Code = StatusCode(iter.ReadEnumValue(StatusCode_value))
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Status) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Message)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Code != 0 {
+ n += 1 + proto.Sov(uint64(orig.Code))
+ }
+ return n
+}
+
+func (orig *Status) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Message)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Message)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ if orig.Code != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Code))
+ pos--
+ buf[pos] = 0x18
+ }
+ return len(buf) - pos
+}
+
+func (orig *Status) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Message = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Code = StatusCode(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestStatus() *Status {
+ orig := NewStatus()
+ orig.Message = "test_message"
+ orig.Code = StatusCode(13)
+ return orig
+}
+
+func GenTestStatusPtrSlice() []*Status {
+ orig := make([]*Status, 5)
+ orig[0] = NewStatus()
+ orig[1] = GenTestStatus()
+ orig[2] = NewStatus()
+ orig[3] = GenTestStatus()
+ orig[4] = NewStatus()
+ return orig
+}
+
+func GenTestStatusSlice() []Status {
+ orig := make([]Status, 5)
+ orig[1] = *GenTestStatus()
+ orig[3] = *GenTestStatus()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go
similarity index 50%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sum.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go
index fcc015ad0..67be5d00f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sum.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go
@@ -10,27 +10,33 @@ import (
"fmt"
"sync"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval.
+type Sum struct {
+ DataPoints []*NumberDataPoint
+ AggregationTemporality AggregationTemporality
+ IsMonotonic bool
+}
+
var (
protoPoolSum = sync.Pool{
New: func() any {
- return &otlpmetrics.Sum{}
+ return &Sum{}
},
}
)
-func NewOrigSum() *otlpmetrics.Sum {
+func NewSum() *Sum {
if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.Sum{}
+ return &Sum{}
}
- return protoPoolSum.Get().(*otlpmetrics.Sum)
+ return protoPoolSum.Get().(*Sum)
}
-func DeleteOrigSum(orig *otlpmetrics.Sum, nullable bool) {
+func DeleteSum(orig *Sum, nullable bool) {
if orig == nil {
return
}
@@ -41,7 +47,7 @@ func DeleteOrigSum(orig *otlpmetrics.Sum, nullable bool) {
}
for i := range orig.DataPoints {
- DeleteOrigNumberDataPoint(orig.DataPoints[i], true)
+ DeleteNumberDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
@@ -50,34 +56,90 @@ func DeleteOrigSum(orig *otlpmetrics.Sum, nullable bool) {
}
}
-func CopyOrigSum(dest, src *otlpmetrics.Sum) {
+func CopySum(dest, src *Sum) *Sum {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
}
- dest.DataPoints = CopyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints)
+
+ if dest == nil {
+ dest = NewSum()
+ }
+ dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
dest.AggregationTemporality = src.AggregationTemporality
+
dest.IsMonotonic = src.IsMonotonic
+
+ return dest
}
-func GenTestOrigSum() *otlpmetrics.Sum {
- orig := NewOrigSum()
- orig.DataPoints = GenerateOrigTestNumberDataPointSlice()
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1)
- orig.IsMonotonic = true
- return orig
+func CopySumSlice(dest, src []Sum) []Sum {
+ var newDest []Sum
+ if cap(dest) < len(src) {
+ newDest = make([]Sum, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSum(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySum(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySumPtrSlice(dest, src []*Sum) []*Sum {
+ var newDest []*Sum
+ if cap(dest) < len(src) {
+ newDest = make([]*Sum, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSum()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSum(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSum()
+ }
+ }
+ for i := range src {
+ CopySum(newDest[i], src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSum(orig *otlpmetrics.Sum, dest *json.Stream) {
+func (orig *Sum) Reset() {
+ *orig = Sum{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Sum) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
- MarshalJSONOrigNumberDataPoint(orig.DataPoints[0], dest)
+ orig.DataPoints[0].MarshalJSON(dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
- MarshalJSONOrigNumberDataPoint(orig.DataPoints[i], dest)
+ orig.DataPoints[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -93,18 +155,18 @@ func MarshalJSONOrigSum(orig *otlpmetrics.Sum, dest *json.Stream) {
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigSum unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSum(orig *otlpmetrics.Sum, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Sum) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
- orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint())
- UnmarshalJSONOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
}
case "aggregationTemporality", "aggregation_temporality":
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value))
+ orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
case "isMonotonic", "is_monotonic":
orig.IsMonotonic = iter.ReadBool()
default:
@@ -113,12 +175,12 @@ func UnmarshalJSONOrigSum(orig *otlpmetrics.Sum, iter *json.Iterator) {
}
}
-func SizeProtoOrigSum(orig *otlpmetrics.Sum) int {
+func (orig *Sum) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
- l = SizeProtoOrigNumberDataPoint(orig.DataPoints[i])
+ l = orig.DataPoints[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.AggregationTemporality != 0 {
@@ -130,12 +192,12 @@ func SizeProtoOrigSum(orig *otlpmetrics.Sum) int {
return n
}
-func MarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) int {
+func (orig *Sum) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
- l = MarshalProtoOrigNumberDataPoint(orig.DataPoints[i], buf[:pos])
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -159,7 +221,7 @@ func MarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) int {
return len(buf) - pos
}
-func UnmarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) error {
+func (orig *Sum) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -184,8 +246,8 @@ func UnmarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) error {
return err
}
startPos := pos - length
- orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint())
- err = UnmarshalProtoOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -200,7 +262,7 @@ func UnmarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) error {
return err
}
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num)
+ orig.AggregationTemporality = AggregationTemporality(num)
case 3:
if wireType != proto.WireTypeVarint {
@@ -222,3 +284,28 @@ func UnmarshalProtoOrigSum(orig *otlpmetrics.Sum, buf []byte) error {
}
return nil
}
+
+func GenTestSum() *Sum {
+ orig := NewSum()
+ orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()}
+ orig.AggregationTemporality = AggregationTemporality(13)
+ orig.IsMonotonic = true
+ return orig
+}
+
+func GenTestSumPtrSlice() []*Sum {
+ orig := make([]*Sum, 5)
+ orig[0] = NewSum()
+ orig[1] = GenTestSum()
+ orig[2] = NewSum()
+ orig[3] = GenTestSum()
+ orig[4] = NewSum()
+ return orig
+}
+
+func GenTestSumSlice() []Sum {
+ orig := make([]Sum, 5)
+ orig[1] = *GenTestSum()
+ orig[3] = *GenTestSum()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go
new file mode 100644
index 000000000..8d40bd166
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.
+type Summary struct {
+ DataPoints []*SummaryDataPoint
+}
+
+var (
+ protoPoolSummary = sync.Pool{
+ New: func() any {
+ return &Summary{}
+ },
+ }
+)
+
+func NewSummary() *Summary {
+ if !UseProtoPooling.IsEnabled() {
+ return &Summary{}
+ }
+ return protoPoolSummary.Get().(*Summary)
+}
+
+func DeleteSummary(orig *Summary, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteSummaryDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSummary.Put(orig)
+ }
+}
+
+func CopySummary(dest, src *Summary) *Summary {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSummary()
+ }
+ dest.DataPoints = CopySummaryDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ return dest
+}
+
+func CopySummarySlice(dest, src []Summary) []Summary {
+ var newDest []Summary
+ if cap(dest) < len(src) {
+ newDest = make([]Summary, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummary(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySummary(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySummaryPtrSlice(dest, src []*Summary) []*Summary {
+ var newDest []*Summary
+ if cap(dest) < len(src) {
+ newDest = make([]*Summary, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummary()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummary(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummary()
+ }
+ }
+ for i := range src {
+ CopySummary(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Summary) Reset() {
+ *orig = Summary{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Summary) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Summary) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Summary) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Summary) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *Summary) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSummary() *Summary {
+ orig := NewSummary()
+ orig.DataPoints = []*SummaryDataPoint{{}, GenTestSummaryDataPoint()}
+ return orig
+}
+
+func GenTestSummaryPtrSlice() []*Summary {
+ orig := make([]*Summary, 5)
+ orig[0] = NewSummary()
+ orig[1] = GenTestSummary()
+ orig[2] = NewSummary()
+ orig[3] = GenTestSummary()
+ orig[4] = NewSummary()
+ return orig
+}
+
+func GenTestSummarySlice() []Summary {
+ orig := make([]Summary, 5)
+ orig[1] = *GenTestSummary()
+ orig[3] = *GenTestSummary()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go
similarity index 56%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go
index 35b413201..bf30987a6 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go
@@ -12,28 +12,37 @@ import (
"math"
"sync"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.
+type SummaryDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Count uint64
+ Sum float64
+ QuantileValues []*SummaryDataPointValueAtQuantile
+ Flags uint32
+}
+
var (
protoPoolSummaryDataPoint = sync.Pool{
New: func() any {
- return &otlpmetrics.SummaryDataPoint{}
+ return &SummaryDataPoint{}
},
}
)
-func NewOrigSummaryDataPoint() *otlpmetrics.SummaryDataPoint {
+func NewSummaryDataPoint() *SummaryDataPoint {
if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.SummaryDataPoint{}
+ return &SummaryDataPoint{}
}
- return protoPoolSummaryDataPoint.Get().(*otlpmetrics.SummaryDataPoint)
+ return protoPoolSummaryDataPoint.Get().(*SummaryDataPoint)
}
-func DeleteOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, nullable bool) {
+func DeleteSummaryDataPoint(orig *SummaryDataPoint, nullable bool) {
if orig == nil {
return
}
@@ -44,10 +53,10 @@ func DeleteOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, nullable boo
}
for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
+ DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.QuantileValues {
- DeleteOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], true)
+ DeleteSummaryDataPointValueAtQuantile(orig.QuantileValues[i], true)
}
orig.Reset()
@@ -56,42 +65,98 @@ func DeleteOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, nullable boo
}
}
-func CopyOrigSummaryDataPoint(dest, src *otlpmetrics.SummaryDataPoint) {
+func CopySummaryDataPoint(dest, src *SummaryDataPoint) *SummaryDataPoint {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
}
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
+
+ if dest == nil {
+ dest = NewSummaryDataPoint()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
dest.StartTimeUnixNano = src.StartTimeUnixNano
+
dest.TimeUnixNano = src.TimeUnixNano
+
dest.Count = src.Count
+
dest.Sum = src.Sum
- dest.QuantileValues = CopyOrigSummaryDataPoint_ValueAtQuantileSlice(dest.QuantileValues, src.QuantileValues)
+
+ dest.QuantileValues = CopySummaryDataPointValueAtQuantilePtrSlice(dest.QuantileValues, src.QuantileValues)
+
dest.Flags = src.Flags
+
+ return dest
}
-func GenTestOrigSummaryDataPoint() *otlpmetrics.SummaryDataPoint {
- orig := NewOrigSummaryDataPoint()
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.StartTimeUnixNano = 1234567890
- orig.TimeUnixNano = 1234567890
- orig.Count = uint64(13)
- orig.Sum = float64(3.1415926)
- orig.QuantileValues = GenerateOrigTestSummaryDataPoint_ValueAtQuantileSlice()
- orig.Flags = 1
- return orig
+func CopySummaryDataPointSlice(dest, src []SummaryDataPoint) []SummaryDataPoint {
+ var newDest []SummaryDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]SummaryDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySummaryDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, dest *json.Stream) {
+func CopySummaryDataPointPtrSlice(dest, src []*SummaryDataPoint) []*SummaryDataPoint {
+ var newDest []*SummaryDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*SummaryDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPoint()
+ }
+ }
+ for i := range src {
+ CopySummaryDataPoint(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SummaryDataPoint) Reset() {
+ *orig = SummaryDataPoint{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SummaryDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
+ orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
+ orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -114,10 +179,10 @@ func MarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, dest *j
if len(orig.QuantileValues) > 0 {
dest.WriteObjectField("quantileValues")
dest.WriteArrayStart()
- MarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[0], dest)
+ orig.QuantileValues[0].MarshalJSON(dest)
for i := 1; i < len(orig.QuantileValues); i++ {
dest.WriteMore()
- MarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], dest)
+ orig.QuantileValues[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
@@ -128,14 +193,14 @@ func MarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, dest *j
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigSummaryDataPoint unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SummaryDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
@@ -148,8 +213,8 @@ func UnmarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, iter
orig.Sum = iter.ReadFloat64()
case "quantileValues", "quantile_values":
for iter.ReadArray() {
- orig.QuantileValues = append(orig.QuantileValues, NewOrigSummaryDataPoint_ValueAtQuantile())
- UnmarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[len(orig.QuantileValues)-1], iter)
+ orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile())
+ orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalJSON(iter)
}
case "flags":
@@ -160,12 +225,12 @@ func UnmarshalJSONOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, iter
}
}
-func SizeProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint) int {
+func (orig *SummaryDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
+ l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
@@ -181,7 +246,7 @@ func SizeProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint) int {
n += 9
}
for i := range orig.QuantileValues {
- l = SizeProtoOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i])
+ l = orig.QuantileValues[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
@@ -190,12 +255,12 @@ func SizeProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint) int {
return n
}
-func MarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []byte) int {
+func (orig *SummaryDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -226,7 +291,7 @@ func MarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []
buf[pos] = 0x29
}
for i := len(orig.QuantileValues) - 1; i >= 0; i-- {
- l = MarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], buf[:pos])
+ l = orig.QuantileValues[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
@@ -240,7 +305,7 @@ func MarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []
return len(buf) - pos
}
-func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []byte) error {
+func (orig *SummaryDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -265,8 +330,8 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf
return err
}
startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -329,8 +394,8 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf
return err
}
startPos := pos - length
- orig.QuantileValues = append(orig.QuantileValues, NewOrigSummaryDataPoint_ValueAtQuantile())
- err = UnmarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[len(orig.QuantileValues)-1], buf[startPos:pos])
+ orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile())
+ err = orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
@@ -355,3 +420,32 @@ func UnmarshalProtoOrigSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf
}
return nil
}
+
+func GenTestSummaryDataPoint() *SummaryDataPoint {
+ orig := NewSummaryDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Count = uint64(13)
+ orig.Sum = float64(3.1415926)
+ orig.QuantileValues = []*SummaryDataPointValueAtQuantile{{}, GenTestSummaryDataPointValueAtQuantile()}
+ orig.Flags = uint32(13)
+ return orig
+}
+
+func GenTestSummaryDataPointPtrSlice() []*SummaryDataPoint {
+ orig := make([]*SummaryDataPoint, 5)
+ orig[0] = NewSummaryDataPoint()
+ orig[1] = GenTestSummaryDataPoint()
+ orig[2] = NewSummaryDataPoint()
+ orig[3] = GenTestSummaryDataPoint()
+ orig[4] = NewSummaryDataPoint()
+ return orig
+}
+
+func GenTestSummaryDataPointSlice() []SummaryDataPoint {
+ orig := make([]SummaryDataPoint, 5)
+ orig[1] = *GenTestSummaryDataPoint()
+ orig[3] = *GenTestSummaryDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go
new file mode 100644
index 000000000..ae92399aa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point.
+type SummaryDataPointValueAtQuantile struct {
+ Quantile float64
+ Value float64
+}
+
+var (
+ protoPoolSummaryDataPointValueAtQuantile = sync.Pool{
+ New: func() any {
+ return &SummaryDataPointValueAtQuantile{}
+ },
+ }
+)
+
+func NewSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile {
+ if !UseProtoPooling.IsEnabled() {
+ return &SummaryDataPointValueAtQuantile{}
+ }
+ return protoPoolSummaryDataPointValueAtQuantile.Get().(*SummaryDataPointValueAtQuantile)
+}
+
+func DeleteSummaryDataPointValueAtQuantile(orig *SummaryDataPointValueAtQuantile, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSummaryDataPointValueAtQuantile.Put(orig)
+ }
+}
+
+func CopySummaryDataPointValueAtQuantile(dest, src *SummaryDataPointValueAtQuantile) *SummaryDataPointValueAtQuantile {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSummaryDataPointValueAtQuantile()
+ }
+ dest.Quantile = src.Quantile
+
+ dest.Value = src.Value
+
+ return dest
+}
+
+func CopySummaryDataPointValueAtQuantileSlice(dest, src []SummaryDataPointValueAtQuantile) []SummaryDataPointValueAtQuantile {
+ var newDest []SummaryDataPointValueAtQuantile
+ if cap(dest) < len(src) {
+ newDest = make([]SummaryDataPointValueAtQuantile, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPointValueAtQuantile(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySummaryDataPointValueAtQuantile(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySummaryDataPointValueAtQuantilePtrSlice(dest, src []*SummaryDataPointValueAtQuantile) []*SummaryDataPointValueAtQuantile {
+ var newDest []*SummaryDataPointValueAtQuantile
+ if cap(dest) < len(src) {
+ newDest = make([]*SummaryDataPointValueAtQuantile, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPointValueAtQuantile()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPointValueAtQuantile(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPointValueAtQuantile()
+ }
+ }
+ for i := range src {
+ CopySummaryDataPointValueAtQuantile(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SummaryDataPointValueAtQuantile) Reset() {
+ *orig = SummaryDataPointValueAtQuantile{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SummaryDataPointValueAtQuantile) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Quantile != float64(0) {
+ dest.WriteObjectField("quantile")
+ dest.WriteFloat64(orig.Quantile)
+ }
+ if orig.Value != float64(0) {
+ dest.WriteObjectField("value")
+ dest.WriteFloat64(orig.Value)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SummaryDataPointValueAtQuantile) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "quantile":
+ orig.Quantile = iter.ReadFloat64()
+ case "value":
+ orig.Value = iter.ReadFloat64()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *SummaryDataPointValueAtQuantile) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.Quantile != 0 {
+ n += 9
+ }
+ if orig.Value != 0 {
+ n += 9
+ }
+ return n
+}
+
+func (orig *SummaryDataPointValueAtQuantile) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.Quantile != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Quantile))
+ pos--
+ buf[pos] = 0x9
+ }
+ if orig.Value != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Value))
+ pos--
+ buf[pos] = 0x11
+ }
+ return len(buf) - pos
+}
+
+func (orig *SummaryDataPointValueAtQuantile) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Quantile = math.Float64frombits(num)
+
+ case 2:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Value = math.Float64frombits(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile {
+ orig := NewSummaryDataPointValueAtQuantile()
+ orig.Quantile = float64(3.1415926)
+ orig.Value = float64(3.1415926)
+ return orig
+}
+
+func GenTestSummaryDataPointValueAtQuantilePtrSlice() []*SummaryDataPointValueAtQuantile {
+ orig := make([]*SummaryDataPointValueAtQuantile, 5)
+ orig[0] = NewSummaryDataPointValueAtQuantile()
+ orig[1] = GenTestSummaryDataPointValueAtQuantile()
+ orig[2] = NewSummaryDataPointValueAtQuantile()
+ orig[3] = GenTestSummaryDataPointValueAtQuantile()
+ orig[4] = NewSummaryDataPointValueAtQuantile()
+ return orig
+}
+
+func GenTestSummaryDataPointValueAtQuantileSlice() []SummaryDataPointValueAtQuantile {
+ orig := make([]SummaryDataPointValueAtQuantile, 5)
+ orig[1] = *GenTestSummaryDataPointValueAtQuantile()
+ orig[3] = *GenTestSummaryDataPointValueAtQuantile()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go
new file mode 100644
index 000000000..d2590bdbb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go
@@ -0,0 +1,295 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type TCPAddr struct {
+ IP []byte
+ Port int64
+ Zone string
+}
+
+var (
+ protoPoolTCPAddr = sync.Pool{
+ New: func() any {
+ return &TCPAddr{}
+ },
+ }
+)
+
+func NewTCPAddr() *TCPAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &TCPAddr{}
+ }
+ return protoPoolTCPAddr.Get().(*TCPAddr)
+}
+
+func DeleteTCPAddr(orig *TCPAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolTCPAddr.Put(orig)
+ }
+}
+
+func CopyTCPAddr(dest, src *TCPAddr) *TCPAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewTCPAddr()
+ }
+ dest.IP = src.IP
+
+ dest.Port = src.Port
+
+ dest.Zone = src.Zone
+
+ return dest
+}
+
+func CopyTCPAddrSlice(dest, src []TCPAddr) []TCPAddr {
+ var newDest []TCPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]TCPAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTCPAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyTCPAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyTCPAddrPtrSlice(dest, src []*TCPAddr) []*TCPAddr {
+ var newDest []*TCPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*TCPAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTCPAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTCPAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTCPAddr()
+ }
+ }
+ for i := range src {
+ CopyTCPAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *TCPAddr) Reset() {
+ *orig = TCPAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *TCPAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+
+ if len(orig.IP) > 0 {
+ dest.WriteObjectField("iP")
+ dest.WriteBytes(orig.IP)
+ }
+ if orig.Port != int64(0) {
+ dest.WriteObjectField("port")
+ dest.WriteInt64(orig.Port)
+ }
+ if orig.Zone != "" {
+ dest.WriteObjectField("zone")
+ dest.WriteString(orig.Zone)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *TCPAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "iP":
+ orig.IP = iter.ReadBytes()
+ case "port":
+ orig.Port = iter.ReadInt64()
+ case "zone":
+ orig.Zone = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *TCPAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Port != 0 {
+ n += 1 + proto.Sov(uint64(orig.Port))
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *TCPAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.IP)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.Port != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Port))
+ pos--
+ buf[pos] = 0x10
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Zone)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *TCPAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.IP = make([]byte, length)
+ copy(orig.IP, buf[startPos:pos])
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Port = int64(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Zone = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestTCPAddr() *TCPAddr {
+ orig := NewTCPAddr()
+ orig.IP = []byte{1, 2, 3}
+ orig.Port = int64(13)
+ orig.Zone = "test_zone"
+ return orig
+}
+
+func GenTestTCPAddrPtrSlice() []*TCPAddr {
+ orig := make([]*TCPAddr, 5)
+ orig[0] = NewTCPAddr()
+ orig[1] = GenTestTCPAddr()
+ orig[2] = NewTCPAddr()
+ orig[3] = GenTestTCPAddr()
+ orig[4] = NewTCPAddr()
+ return orig
+}
+
+func GenTestTCPAddrSlice() []TCPAddr {
+ orig := make([]TCPAddr, 5)
+ orig[1] = *GenTestTCPAddr()
+ orig[3] = *GenTestTCPAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go
new file mode 100644
index 000000000..f63549f1c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go
@@ -0,0 +1,247 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// TracesData represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do not
+// implement the OTLP protocol.
+type TracesData struct {
+ ResourceSpans []*ResourceSpans
+}
+
+var (
+ protoPoolTracesData = sync.Pool{
+ New: func() any {
+ return &TracesData{}
+ },
+ }
+)
+
+func NewTracesData() *TracesData {
+ if !UseProtoPooling.IsEnabled() {
+ return &TracesData{}
+ }
+ return protoPoolTracesData.Get().(*TracesData)
+}
+
+func DeleteTracesData(orig *TracesData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceSpans {
+ DeleteResourceSpans(orig.ResourceSpans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolTracesData.Put(orig)
+ }
+}
+
+func CopyTracesData(dest, src *TracesData) *TracesData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewTracesData()
+ }
+ dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans)
+
+ return dest
+}
+
+func CopyTracesDataSlice(dest, src []TracesData) []TracesData {
+ var newDest []TracesData
+ if cap(dest) < len(src) {
+ newDest = make([]TracesData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyTracesData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyTracesDataPtrSlice(dest, src []*TracesData) []*TracesData {
+ var newDest []*TracesData
+ if cap(dest) < len(src) {
+ newDest = make([]*TracesData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesData()
+ }
+ }
+ for i := range src {
+ CopyTracesData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *TracesData) Reset() {
+ *orig = TracesData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *TracesData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceSpans) > 0 {
+ dest.WriteObjectField("resourceSpans")
+ dest.WriteArrayStart()
+ orig.ResourceSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceSpans); i++ {
+ dest.WriteMore()
+ orig.ResourceSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *TracesData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceSpans", "resource_spans":
+ for iter.ReadArray() {
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *TracesData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceSpans {
+ l = orig.ResourceSpans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *TracesData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
+ l = orig.ResourceSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *TracesData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestTracesData() *TracesData {
+ orig := NewTracesData()
+ orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()}
+ return orig
+}
+
+func GenTestTracesDataPtrSlice() []*TracesData {
+ orig := make([]*TracesData, 5)
+ orig[0] = NewTracesData()
+ orig[1] = GenTestTracesData()
+ orig[2] = NewTracesData()
+ orig[3] = GenTestTracesData()
+ orig[4] = NewTracesData()
+ return orig
+}
+
+func GenTestTracesDataSlice() []TracesData {
+ orig := make([]TracesData, 5)
+ orig[1] = *GenTestTracesData()
+ orig[3] = *GenTestTracesData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go
new file mode 100644
index 000000000..d3e3fd563
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type TracesRequest struct {
+ RequestContext *RequestContext
+ TracesData TracesData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolTracesRequest = sync.Pool{
+ New: func() any {
+ return &TracesRequest{}
+ },
+ }
+)
+
+func NewTracesRequest() *TracesRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &TracesRequest{}
+ }
+ return protoPoolTracesRequest.Get().(*TracesRequest)
+}
+
+func DeleteTracesRequest(orig *TracesRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteTracesData(&orig.TracesData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolTracesRequest.Put(orig)
+ }
+}
+
+func CopyTracesRequest(dest, src *TracesRequest) *TracesRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewTracesRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyTracesData(&dest.TracesData, &src.TracesData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyTracesRequestSlice(dest, src []TracesRequest) []TracesRequest {
+ var newDest []TracesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]TracesRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyTracesRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyTracesRequestPtrSlice(dest, src []*TracesRequest) []*TracesRequest {
+ var newDest []*TracesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*TracesRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesRequest()
+ }
+ }
+ for i := range src {
+ CopyTracesRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *TracesRequest) Reset() {
+ *orig = TracesRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *TracesRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("tracesData")
+ orig.TracesData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *TracesRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "tracesData", "traces_data":
+
+ orig.TracesData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *TracesRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.TracesData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *TracesRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.TracesData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *TracesRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TracesData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TracesData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestTracesRequest() *TracesRequest {
+ orig := NewTracesRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.TracesData = *GenTestTracesData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestTracesRequestPtrSlice() []*TracesRequest {
+ orig := make([]*TracesRequest, 5)
+ orig[0] = NewTracesRequest()
+ orig[1] = GenTestTracesRequest()
+ orig[2] = NewTracesRequest()
+ orig[3] = GenTestTracesRequest()
+ orig[4] = NewTracesRequest()
+ return orig
+}
+
+func GenTestTracesRequestSlice() []TracesRequest {
+ orig := make([]TracesRequest, 5)
+ orig[1] = *GenTestTracesRequest()
+ orig[3] = *GenTestTracesRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go
new file mode 100644
index 000000000..f197afd19
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go
@@ -0,0 +1,295 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type UDPAddr struct {
+ IP []byte
+ Port int64
+ Zone string
+}
+
+var (
+ protoPoolUDPAddr = sync.Pool{
+ New: func() any {
+ return &UDPAddr{}
+ },
+ }
+)
+
+func NewUDPAddr() *UDPAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &UDPAddr{}
+ }
+ return protoPoolUDPAddr.Get().(*UDPAddr)
+}
+
+func DeleteUDPAddr(orig *UDPAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolUDPAddr.Put(orig)
+ }
+}
+
+func CopyUDPAddr(dest, src *UDPAddr) *UDPAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewUDPAddr()
+ }
+ dest.IP = src.IP
+
+ dest.Port = src.Port
+
+ dest.Zone = src.Zone
+
+ return dest
+}
+
+func CopyUDPAddrSlice(dest, src []UDPAddr) []UDPAddr {
+ var newDest []UDPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]UDPAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUDPAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyUDPAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyUDPAddrPtrSlice(dest, src []*UDPAddr) []*UDPAddr {
+ var newDest []*UDPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*UDPAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUDPAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUDPAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUDPAddr()
+ }
+ }
+ for i := range src {
+ CopyUDPAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *UDPAddr) Reset() {
+ *orig = UDPAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *UDPAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+
+ if len(orig.IP) > 0 {
+ dest.WriteObjectField("iP")
+ dest.WriteBytes(orig.IP)
+ }
+ if orig.Port != int64(0) {
+ dest.WriteObjectField("port")
+ dest.WriteInt64(orig.Port)
+ }
+ if orig.Zone != "" {
+ dest.WriteObjectField("zone")
+ dest.WriteString(orig.Zone)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *UDPAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "iP":
+ orig.IP = iter.ReadBytes()
+ case "port":
+ orig.Port = iter.ReadInt64()
+ case "zone":
+ orig.Zone = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *UDPAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Port != 0 {
+ n += 1 + proto.Sov(uint64(orig.Port))
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *UDPAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.IP)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.Port != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Port))
+ pos--
+ buf[pos] = 0x10
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Zone)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *UDPAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.IP = make([]byte, length)
+ copy(orig.IP, buf[startPos:pos])
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Port = int64(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Zone = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestUDPAddr() *UDPAddr {
+ orig := NewUDPAddr()
+ orig.IP = []byte{1, 2, 3}
+ orig.Port = int64(13)
+ orig.Zone = "test_zone"
+ return orig
+}
+
+func GenTestUDPAddrPtrSlice() []*UDPAddr {
+ orig := make([]*UDPAddr, 5)
+ orig[0] = NewUDPAddr()
+ orig[1] = GenTestUDPAddr()
+ orig[2] = NewUDPAddr()
+ orig[3] = GenTestUDPAddr()
+ orig[4] = NewUDPAddr()
+ return orig
+}
+
+func GenTestUDPAddrSlice() []UDPAddr {
+ orig := make([]UDPAddr, 5)
+ orig[1] = *GenTestUDPAddr()
+ orig[3] = *GenTestUDPAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go
new file mode 100644
index 000000000..b7d25686c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go
@@ -0,0 +1,261 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type UnixAddr struct {
+ Name string
+ Net string
+}
+
+var (
+ protoPoolUnixAddr = sync.Pool{
+ New: func() any {
+ return &UnixAddr{}
+ },
+ }
+)
+
+func NewUnixAddr() *UnixAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &UnixAddr{}
+ }
+ return protoPoolUnixAddr.Get().(*UnixAddr)
+}
+
+func DeleteUnixAddr(orig *UnixAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolUnixAddr.Put(orig)
+ }
+}
+
+func CopyUnixAddr(dest, src *UnixAddr) *UnixAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewUnixAddr()
+ }
+ dest.Name = src.Name
+
+ dest.Net = src.Net
+
+ return dest
+}
+
+func CopyUnixAddrSlice(dest, src []UnixAddr) []UnixAddr {
+ var newDest []UnixAddr
+ if cap(dest) < len(src) {
+ newDest = make([]UnixAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUnixAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyUnixAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyUnixAddrPtrSlice(dest, src []*UnixAddr) []*UnixAddr {
+ var newDest []*UnixAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*UnixAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUnixAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUnixAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUnixAddr()
+ }
+ }
+ for i := range src {
+ CopyUnixAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *UnixAddr) Reset() {
+ *orig = UnixAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *UnixAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+ if orig.Net != "" {
+ dest.WriteObjectField("net")
+ dest.WriteString(orig.Net)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *UnixAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "name":
+ orig.Name = iter.ReadString()
+ case "net":
+ orig.Net = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *UnixAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Net)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *UnixAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Net)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Net)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *UnixAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Net", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Net = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestUnixAddr() *UnixAddr {
+ orig := NewUnixAddr()
+ orig.Name = "test_name"
+ orig.Net = "test_net"
+ return orig
+}
+
+func GenTestUnixAddrPtrSlice() []*UnixAddr {
+ orig := make([]*UnixAddr, 5)
+ orig[0] = NewUnixAddr()
+ orig[1] = GenTestUnixAddr()
+ orig[2] = NewUnixAddr()
+ orig[3] = GenTestUnixAddr()
+ orig[4] = NewUnixAddr()
+ return orig
+}
+
+func GenTestUnixAddrSlice() []UnixAddr {
+ orig := make([]UnixAddr, 5)
+ orig[1] = *GenTestUnixAddr()
+ orig[3] = *GenTestUnixAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go
similarity index 50%
rename from vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetype.go
rename to vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go
index b0b79601d..7742846f2 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetype.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go
@@ -10,27 +10,32 @@ import (
"fmt"
"sync"
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
+// ValueType describes the type and units of a value.
+type ValueType struct {
+ TypeStrindex int32
+ UnitStrindex int32
+}
+
var (
protoPoolValueType = sync.Pool{
New: func() any {
- return &otlpprofiles.ValueType{}
+ return &ValueType{}
},
}
)
-func NewOrigValueType() *otlpprofiles.ValueType {
+func NewValueType() *ValueType {
if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.ValueType{}
+ return &ValueType{}
}
- return protoPoolValueType.Get().(*otlpprofiles.ValueType)
+ return protoPoolValueType.Get().(*ValueType)
}
-func DeleteOrigValueType(orig *otlpprofiles.ValueType, nullable bool) {
+func DeleteValueType(orig *ValueType, nullable bool) {
if orig == nil {
return
}
@@ -46,26 +51,80 @@ func DeleteOrigValueType(orig *otlpprofiles.ValueType, nullable bool) {
}
}
-func CopyOrigValueType(dest, src *otlpprofiles.ValueType) {
+func CopyValueType(dest, src *ValueType) *ValueType {
// If copying to same object, just return.
if src == dest {
- return
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewValueType()
}
dest.TypeStrindex = src.TypeStrindex
+
dest.UnitStrindex = src.UnitStrindex
- dest.AggregationTemporality = src.AggregationTemporality
+
+ return dest
}
-func GenTestOrigValueType() *otlpprofiles.ValueType {
- orig := NewOrigValueType()
- orig.TypeStrindex = int32(13)
- orig.UnitStrindex = int32(13)
- orig.AggregationTemporality = otlpprofiles.AggregationTemporality(1)
- return orig
+func CopyValueTypeSlice(dest, src []ValueType) []ValueType {
+ var newDest []ValueType
+ if cap(dest) < len(src) {
+ newDest = make([]ValueType, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteValueType(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyValueType(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyValueTypePtrSlice(dest, src []*ValueType) []*ValueType {
+ var newDest []*ValueType
+ if cap(dest) < len(src) {
+ newDest = make([]*ValueType, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewValueType()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteValueType(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewValueType()
+ }
+ }
+ for i := range src {
+ CopyValueType(newDest[i], src[i])
+ }
+ return newDest
}
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigValueType(orig *otlpprofiles.ValueType, dest *json.Stream) {
+func (orig *ValueType) Reset() {
+ *orig = ValueType{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ValueType) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.TypeStrindex != int32(0) {
dest.WriteObjectField("typeStrindex")
@@ -75,31 +134,24 @@ func MarshalJSONOrigValueType(orig *otlpprofiles.ValueType, dest *json.Stream) {
dest.WriteObjectField("unitStrindex")
dest.WriteInt32(orig.UnitStrindex)
}
-
- if int32(orig.AggregationTemporality) != 0 {
- dest.WriteObjectField("aggregationTemporality")
- dest.WriteInt32(int32(orig.AggregationTemporality))
- }
dest.WriteObjectEnd()
}
-// UnmarshalJSONOrigValueType unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigValueType(orig *otlpprofiles.ValueType, iter *json.Iterator) {
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ValueType) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "typeStrindex", "type_strindex":
orig.TypeStrindex = iter.ReadInt32()
case "unitStrindex", "unit_strindex":
orig.UnitStrindex = iter.ReadInt32()
- case "aggregationTemporality", "aggregation_temporality":
- orig.AggregationTemporality = otlpprofiles.AggregationTemporality(iter.ReadEnumValue(otlpprofiles.AggregationTemporality_value))
default:
iter.Skip()
}
}
}
-func SizeProtoOrigValueType(orig *otlpprofiles.ValueType) int {
+func (orig *ValueType) SizeProto() int {
var n int
var l int
_ = l
@@ -109,13 +161,10 @@ func SizeProtoOrigValueType(orig *otlpprofiles.ValueType) int {
if orig.UnitStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.UnitStrindex))
}
- if orig.AggregationTemporality != 0 {
- n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
- }
return n
}
-func MarshalProtoOrigValueType(orig *otlpprofiles.ValueType, buf []byte) int {
+func (orig *ValueType) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
@@ -129,15 +178,10 @@ func MarshalProtoOrigValueType(orig *otlpprofiles.ValueType, buf []byte) int {
pos--
buf[pos] = 0x10
}
- if orig.AggregationTemporality != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
- pos--
- buf[pos] = 0x18
- }
return len(buf) - pos
}
-func UnmarshalProtoOrigValueType(orig *otlpprofiles.ValueType, buf []byte) error {
+func (orig *ValueType) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
@@ -175,18 +219,6 @@ func UnmarshalProtoOrigValueType(orig *otlpprofiles.ValueType, buf []byte) error
}
orig.UnitStrindex = int32(num)
-
- case 3:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.AggregationTemporality = otlpprofiles.AggregationTemporality(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
@@ -196,3 +228,27 @@ func UnmarshalProtoOrigValueType(orig *otlpprofiles.ValueType, buf []byte) error
}
return nil
}
+
+func GenTestValueType() *ValueType {
+ orig := NewValueType()
+ orig.TypeStrindex = int32(13)
+ orig.UnitStrindex = int32(13)
+ return orig
+}
+
+func GenTestValueTypePtrSlice() []*ValueType {
+ orig := make([]*ValueType, 5)
+ orig[0] = NewValueType()
+ orig[1] = GenTestValueType()
+ orig[2] = NewValueType()
+ orig[3] = GenTestValueType()
+ orig[4] = NewValueType()
+ return orig
+}
+
+func GenTestValueTypeSlice() []ValueType {
+ orig := make([]ValueType, 5)
+ orig[1] = *GenTestValueType()
+ orig[3] = *GenTestValueType()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalue.go
deleted file mode 100644
index 013192398..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalue.go
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "encoding/binary"
- "fmt"
- "math"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolAnyValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue{}
- },
- }
-
- ProtoPoolAnyValue_StringValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue_StringValue{}
- },
- }
-
- ProtoPoolAnyValue_BoolValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue_BoolValue{}
- },
- }
-
- ProtoPoolAnyValue_IntValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue_IntValue{}
- },
- }
-
- ProtoPoolAnyValue_DoubleValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue_DoubleValue{}
- },
- }
-
- ProtoPoolAnyValue_ArrayValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue_ArrayValue{}
- },
- }
-
- ProtoPoolAnyValue_KvlistValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue_KvlistValue{}
- },
- }
-
- ProtoPoolAnyValue_BytesValue = sync.Pool{
- New: func() any {
- return &otlpcommon.AnyValue_BytesValue{}
- },
- }
-)
-
-func NewOrigAnyValue() *otlpcommon.AnyValue {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue{}
- }
- return protoPoolAnyValue.Get().(*otlpcommon.AnyValue)
-}
-
-func DeleteOrigAnyValue(orig *otlpcommon.AnyValue, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- switch ov := orig.Value.(type) {
- case *otlpcommon.AnyValue_StringValue:
- if UseProtoPooling.IsEnabled() {
- ov.StringValue = ""
- ProtoPoolAnyValue_StringValue.Put(ov)
- }
- case *otlpcommon.AnyValue_BoolValue:
- if UseProtoPooling.IsEnabled() {
- ov.BoolValue = false
- ProtoPoolAnyValue_BoolValue.Put(ov)
- }
- case *otlpcommon.AnyValue_IntValue:
- if UseProtoPooling.IsEnabled() {
- ov.IntValue = int64(0)
- ProtoPoolAnyValue_IntValue.Put(ov)
- }
- case *otlpcommon.AnyValue_DoubleValue:
- if UseProtoPooling.IsEnabled() {
- ov.DoubleValue = float64(0)
- ProtoPoolAnyValue_DoubleValue.Put(ov)
- }
- case *otlpcommon.AnyValue_ArrayValue:
- DeleteOrigArrayValue(ov.ArrayValue, true)
- ov.ArrayValue = nil
- ProtoPoolAnyValue_ArrayValue.Put(ov)
- case *otlpcommon.AnyValue_KvlistValue:
- DeleteOrigKeyValueList(ov.KvlistValue, true)
- ov.KvlistValue = nil
- ProtoPoolAnyValue_KvlistValue.Put(ov)
- case *otlpcommon.AnyValue_BytesValue:
- if UseProtoPooling.IsEnabled() {
- ov.BytesValue = nil
- ProtoPoolAnyValue_BytesValue.Put(ov)
- }
-
- }
-
- orig.Reset()
- if nullable {
- protoPoolAnyValue.Put(orig)
- }
-}
-
-func CopyOrigAnyValue(dest, src *otlpcommon.AnyValue) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- switch t := src.Value.(type) {
- case *otlpcommon.AnyValue_StringValue:
- var ov *otlpcommon.AnyValue_StringValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_StringValue{}
- } else {
- ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
- }
- ov.StringValue = t.StringValue
- dest.Value = ov
- case *otlpcommon.AnyValue_BoolValue:
- var ov *otlpcommon.AnyValue_BoolValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_BoolValue{}
- } else {
- ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
- }
- ov.BoolValue = t.BoolValue
- dest.Value = ov
- case *otlpcommon.AnyValue_IntValue:
- var ov *otlpcommon.AnyValue_IntValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_IntValue{}
- } else {
- ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
- }
- ov.IntValue = t.IntValue
- dest.Value = ov
- case *otlpcommon.AnyValue_DoubleValue:
- var ov *otlpcommon.AnyValue_DoubleValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_DoubleValue{}
- } else {
- ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
- }
- ov.DoubleValue = t.DoubleValue
- dest.Value = ov
- case *otlpcommon.AnyValue_ArrayValue:
- var ov *otlpcommon.AnyValue_ArrayValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_ArrayValue{}
- } else {
- ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
- }
- ov.ArrayValue = NewOrigArrayValue()
- CopyOrigArrayValue(ov.ArrayValue, t.ArrayValue)
- dest.Value = ov
- case *otlpcommon.AnyValue_KvlistValue:
- var ov *otlpcommon.AnyValue_KvlistValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_KvlistValue{}
- } else {
- ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
- }
- ov.KvlistValue = NewOrigKeyValueList()
- CopyOrigKeyValueList(ov.KvlistValue, t.KvlistValue)
- dest.Value = ov
- case *otlpcommon.AnyValue_BytesValue:
- var ov *otlpcommon.AnyValue_BytesValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_BytesValue{}
- } else {
- ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
- }
- ov.BytesValue = t.BytesValue
- dest.Value = ov
- }
-}
-
-func GenTestOrigAnyValue() *otlpcommon.AnyValue {
- orig := NewOrigAnyValue()
- orig.Value = &otlpcommon.AnyValue_BoolValue{BoolValue: true}
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigAnyValue(orig *otlpcommon.AnyValue, dest *json.Stream) {
- dest.WriteObjectStart()
- switch orig := orig.Value.(type) {
- case *otlpcommon.AnyValue_StringValue:
- dest.WriteObjectField("stringValue")
- dest.WriteString(orig.StringValue)
- case *otlpcommon.AnyValue_BoolValue:
- dest.WriteObjectField("boolValue")
- dest.WriteBool(orig.BoolValue)
- case *otlpcommon.AnyValue_IntValue:
- dest.WriteObjectField("intValue")
- dest.WriteInt64(orig.IntValue)
- case *otlpcommon.AnyValue_DoubleValue:
- dest.WriteObjectField("doubleValue")
- dest.WriteFloat64(orig.DoubleValue)
- case *otlpcommon.AnyValue_ArrayValue:
- if orig.ArrayValue != nil {
- dest.WriteObjectField("arrayValue")
- MarshalJSONOrigArrayValue(orig.ArrayValue, dest)
- }
- case *otlpcommon.AnyValue_KvlistValue:
- if orig.KvlistValue != nil {
- dest.WriteObjectField("kvlistValue")
- MarshalJSONOrigKeyValueList(orig.KvlistValue, dest)
- }
- case *otlpcommon.AnyValue_BytesValue:
-
- dest.WriteObjectField("bytesValue")
- dest.WriteBytes(orig.BytesValue)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigValue unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigAnyValue(orig *otlpcommon.AnyValue, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
-
- case "stringValue", "string_value":
- {
- var ov *otlpcommon.AnyValue_StringValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_StringValue{}
- } else {
- ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
- }
- ov.StringValue = iter.ReadString()
- orig.Value = ov
- }
-
- case "boolValue", "bool_value":
- {
- var ov *otlpcommon.AnyValue_BoolValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_BoolValue{}
- } else {
- ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
- }
- ov.BoolValue = iter.ReadBool()
- orig.Value = ov
- }
-
- case "intValue", "int_value":
- {
- var ov *otlpcommon.AnyValue_IntValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_IntValue{}
- } else {
- ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
- }
- ov.IntValue = iter.ReadInt64()
- orig.Value = ov
- }
-
- case "doubleValue", "double_value":
- {
- var ov *otlpcommon.AnyValue_DoubleValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_DoubleValue{}
- } else {
- ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
- }
- ov.DoubleValue = iter.ReadFloat64()
- orig.Value = ov
- }
-
- case "arrayValue", "array_value":
- {
- var ov *otlpcommon.AnyValue_ArrayValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_ArrayValue{}
- } else {
- ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
- }
- ov.ArrayValue = NewOrigArrayValue()
- UnmarshalJSONOrigArrayValue(ov.ArrayValue, iter)
- orig.Value = ov
- }
-
- case "kvlistValue", "kvlist_value":
- {
- var ov *otlpcommon.AnyValue_KvlistValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_KvlistValue{}
- } else {
- ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
- }
- ov.KvlistValue = NewOrigKeyValueList()
- UnmarshalJSONOrigKeyValueList(ov.KvlistValue, iter)
- orig.Value = ov
- }
-
- case "bytesValue", "bytes_value":
- {
- var ov *otlpcommon.AnyValue_BytesValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_BytesValue{}
- } else {
- ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
- }
- ov.BytesValue = iter.ReadBytes()
- orig.Value = ov
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigAnyValue(orig *otlpcommon.AnyValue) int {
- var n int
- var l int
- _ = l
- switch orig := orig.Value.(type) {
- case nil:
- _ = orig
- break
- case *otlpcommon.AnyValue_StringValue:
- l = len(orig.StringValue)
- n += 1 + proto.Sov(uint64(l)) + l
- case *otlpcommon.AnyValue_BoolValue:
- n += 2
- case *otlpcommon.AnyValue_IntValue:
- n += 1 + proto.Sov(uint64(orig.IntValue))
- case *otlpcommon.AnyValue_DoubleValue:
- n += 9
- case *otlpcommon.AnyValue_ArrayValue:
- l = SizeProtoOrigArrayValue(orig.ArrayValue)
- n += 1 + proto.Sov(uint64(l)) + l
- case *otlpcommon.AnyValue_KvlistValue:
- l = SizeProtoOrigKeyValueList(orig.KvlistValue)
- n += 1 + proto.Sov(uint64(l)) + l
- case *otlpcommon.AnyValue_BytesValue:
- l = len(orig.BytesValue)
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigAnyValue(orig *otlpcommon.AnyValue, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- switch orig := orig.Value.(type) {
- case *otlpcommon.AnyValue_StringValue:
- l = len(orig.StringValue)
- pos -= l
- copy(buf[pos:], orig.StringValue)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- case *otlpcommon.AnyValue_BoolValue:
- pos--
- if orig.BoolValue {
- buf[pos] = 1
- } else {
- buf[pos] = 0
- }
- pos--
- buf[pos] = 0x10
-
- case *otlpcommon.AnyValue_IntValue:
- pos = proto.EncodeVarint(buf, pos, uint64(orig.IntValue))
- pos--
- buf[pos] = 0x18
-
- case *otlpcommon.AnyValue_DoubleValue:
- pos -= 8
- binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.DoubleValue))
- pos--
- buf[pos] = 0x21
-
- case *otlpcommon.AnyValue_ArrayValue:
-
- l = MarshalProtoOrigArrayValue(orig.ArrayValue, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x2a
-
- case *otlpcommon.AnyValue_KvlistValue:
-
- l = MarshalProtoOrigKeyValueList(orig.KvlistValue, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x32
-
- case *otlpcommon.AnyValue_BytesValue:
- l = len(orig.BytesValue)
- pos -= l
- copy(buf[pos:], orig.BytesValue)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x3a
-
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigAnyValue(orig *otlpcommon.AnyValue, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpcommon.AnyValue_StringValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_StringValue{}
- } else {
- ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
- }
- ov.StringValue = string(buf[startPos:pos])
- orig.Value = ov
-
- case 2:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
- var ov *otlpcommon.AnyValue_BoolValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_BoolValue{}
- } else {
- ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
- }
- ov.BoolValue = num != 0
- orig.Value = ov
-
- case 3:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
- var ov *otlpcommon.AnyValue_IntValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_IntValue{}
- } else {
- ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
- }
- ov.IntValue = int64(num)
- orig.Value = ov
-
- case 4:
- if wireType != proto.WireTypeI64 {
- return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeI64(buf, pos)
- if err != nil {
- return err
- }
- var ov *otlpcommon.AnyValue_DoubleValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_DoubleValue{}
- } else {
- ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
- }
- ov.DoubleValue = math.Float64frombits(num)
- orig.Value = ov
-
- case 5:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpcommon.AnyValue_ArrayValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_ArrayValue{}
- } else {
- ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
- }
- ov.ArrayValue = NewOrigArrayValue()
- err = UnmarshalProtoOrigArrayValue(ov.ArrayValue, buf[startPos:pos])
- if err != nil {
- return err
- }
- orig.Value = ov
-
- case 6:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpcommon.AnyValue_KvlistValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_KvlistValue{}
- } else {
- ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
- }
- ov.KvlistValue = NewOrigKeyValueList()
- err = UnmarshalProtoOrigKeyValueList(ov.KvlistValue, buf[startPos:pos])
- if err != nil {
- return err
- }
- orig.Value = ov
-
- case 7:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpcommon.AnyValue_BytesValue
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpcommon.AnyValue_BytesValue{}
- } else {
- ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
- }
- if length != 0 {
- ov.BytesValue = make([]byte, length)
- copy(ov.BytesValue, buf[startPos:pos])
- }
- orig.Value = ov
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go
index 7381e5f77..2fd1f1132 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go
@@ -6,53 +6,24 @@
package internal
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type Slice struct {
- orig *[]otlpcommon.AnyValue
+type SliceWrapper struct {
+ orig *[]AnyValue
state *State
}
-func GetOrigSlice(ms Slice) *[]otlpcommon.AnyValue {
+func GetSliceOrig(ms SliceWrapper) *[]AnyValue {
return ms.orig
}
-func GetSliceState(ms Slice) *State {
+func GetSliceState(ms SliceWrapper) *State {
return ms.state
}
-func NewSlice(orig *[]otlpcommon.AnyValue, state *State) Slice {
- return Slice{orig: orig, state: state}
-}
-
-func GenerateTestSlice() Slice {
- orig := GenerateOrigTestAnyValueSlice()
- return NewSlice(&orig, NewState())
-}
-
-func CopyOrigAnyValueSlice(dest, src []otlpcommon.AnyValue) []otlpcommon.AnyValue {
- var newDest []otlpcommon.AnyValue
- if cap(dest) < len(src) {
- newDest = make([]otlpcommon.AnyValue, len(src))
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigAnyValue(&dest[i], false)
- }
- }
- for i := range src {
- CopyOrigAnyValue(&newDest[i], &src[i])
- }
- return newDest
+func NewSliceWrapper(orig *[]AnyValue, state *State) SliceWrapper {
+ return SliceWrapper{orig: orig, state: state}
}
-func GenerateOrigTestAnyValueSlice() []otlpcommon.AnyValue {
- orig := make([]otlpcommon.AnyValue, 5)
- orig[1] = *GenTestOrigAnyValue()
- orig[3] = *GenTestOrigAnyValue()
- return orig
+func GenTestSliceWrapper() SliceWrapper {
+ orig := GenTestAnyValueSlice()
+ return NewSliceWrapper(&orig, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_arrayvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_arrayvalue.go
deleted file mode 100644
index c168fe718..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_arrayvalue.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolArrayValue = sync.Pool{
- New: func() any {
- return &otlpcommon.ArrayValue{}
- },
- }
-)
-
-func NewOrigArrayValue() *otlpcommon.ArrayValue {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.ArrayValue{}
- }
- return protoPoolArrayValue.Get().(*otlpcommon.ArrayValue)
-}
-
-func DeleteOrigArrayValue(orig *otlpcommon.ArrayValue, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.Values {
- DeleteOrigAnyValue(&orig.Values[i], false)
- }
-
- orig.Reset()
- if nullable {
- protoPoolArrayValue.Put(orig)
- }
-}
-
-func CopyOrigArrayValue(dest, src *otlpcommon.ArrayValue) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Values = CopyOrigAnyValueSlice(dest.Values, src.Values)
-}
-
-func GenTestOrigArrayValue() *otlpcommon.ArrayValue {
- orig := NewOrigArrayValue()
- orig.Values = GenerateOrigTestAnyValueSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigArrayValue(orig *otlpcommon.ArrayValue, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.Values) > 0 {
- dest.WriteObjectField("values")
- dest.WriteArrayStart()
- MarshalJSONOrigAnyValue(&orig.Values[0], dest)
- for i := 1; i < len(orig.Values); i++ {
- dest.WriteMore()
- MarshalJSONOrigAnyValue(&orig.Values[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigArrayValue unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigArrayValue(orig *otlpcommon.ArrayValue, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "values":
- for iter.ReadArray() {
- orig.Values = append(orig.Values, otlpcommon.AnyValue{})
- UnmarshalJSONOrigAnyValue(&orig.Values[len(orig.Values)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigArrayValue(orig *otlpcommon.ArrayValue) int {
- var n int
- var l int
- _ = l
- for i := range orig.Values {
- l = SizeProtoOrigAnyValue(&orig.Values[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigArrayValue(orig *otlpcommon.ArrayValue, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.Values) - 1; i >= 0; i-- {
- l = MarshalProtoOrigAnyValue(&orig.Values[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigArrayValue(orig *otlpcommon.ArrayValue, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Values = append(orig.Values, otlpcommon.AnyValue{})
- err = UnmarshalProtoOrigAnyValue(&orig.Values[len(orig.Values)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_attributeunit.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_attributeunit.go
deleted file mode 100644
index a3d003443..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_attributeunit.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolAttributeUnit = sync.Pool{
- New: func() any {
- return &otlpprofiles.AttributeUnit{}
- },
- }
-)
-
-func NewOrigAttributeUnit() *otlpprofiles.AttributeUnit {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.AttributeUnit{}
- }
- return protoPoolAttributeUnit.Get().(*otlpprofiles.AttributeUnit)
-}
-
-func DeleteOrigAttributeUnit(orig *otlpprofiles.AttributeUnit, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolAttributeUnit.Put(orig)
- }
-}
-
-func CopyOrigAttributeUnit(dest, src *otlpprofiles.AttributeUnit) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.AttributeKeyStrindex = src.AttributeKeyStrindex
- dest.UnitStrindex = src.UnitStrindex
-}
-
-func GenTestOrigAttributeUnit() *otlpprofiles.AttributeUnit {
- orig := NewOrigAttributeUnit()
- orig.AttributeKeyStrindex = int32(13)
- orig.UnitStrindex = int32(13)
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigAttributeUnit(orig *otlpprofiles.AttributeUnit, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.AttributeKeyStrindex != int32(0) {
- dest.WriteObjectField("attributeKeyStrindex")
- dest.WriteInt32(orig.AttributeKeyStrindex)
- }
- if orig.UnitStrindex != int32(0) {
- dest.WriteObjectField("unitStrindex")
- dest.WriteInt32(orig.UnitStrindex)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigAttributeUnit unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigAttributeUnit(orig *otlpprofiles.AttributeUnit, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "attributeKeyStrindex", "attribute_key_strindex":
- orig.AttributeKeyStrindex = iter.ReadInt32()
- case "unitStrindex", "unit_strindex":
- orig.UnitStrindex = iter.ReadInt32()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigAttributeUnit(orig *otlpprofiles.AttributeUnit) int {
- var n int
- var l int
- _ = l
- if orig.AttributeKeyStrindex != 0 {
- n += 1 + proto.Sov(uint64(orig.AttributeKeyStrindex))
- }
- if orig.UnitStrindex != 0 {
- n += 1 + proto.Sov(uint64(orig.UnitStrindex))
- }
- return n
-}
-
-func MarshalProtoOrigAttributeUnit(orig *otlpprofiles.AttributeUnit, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.AttributeKeyStrindex != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeKeyStrindex))
- pos--
- buf[pos] = 0x8
- }
- if orig.UnitStrindex != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
- pos--
- buf[pos] = 0x10
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigAttributeUnit(orig *otlpprofiles.AttributeUnit, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeKeyStrindex", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.AttributeKeyStrindex = int32(num)
-
- case 2:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.UnitStrindex = int32(num)
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_attributeunitslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_attributeunitslice.go
deleted file mode 100644
index b6a4af2b7..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_attributeunitslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigAttributeUnitSlice(dest, src []*otlpprofiles.AttributeUnit) []*otlpprofiles.AttributeUnit {
- var newDest []*otlpprofiles.AttributeUnit
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.AttributeUnit, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigAttributeUnit()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigAttributeUnit(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigAttributeUnit()
- }
- }
- for i := range src {
- CopyOrigAttributeUnit(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestAttributeUnitSlice() []*otlpprofiles.AttributeUnit {
- orig := make([]*otlpprofiles.AttributeUnit, 5)
- orig[0] = NewOrigAttributeUnit()
- orig[1] = GenTestOrigAttributeUnit()
- orig[2] = NewOrigAttributeUnit()
- orig[3] = GenTestOrigAttributeUnit()
- orig[4] = NewOrigAttributeUnit()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go
index fe7e4128a..ecdbd89ce 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go
@@ -6,32 +6,28 @@
package internal
-type ByteSlice struct {
+type ByteSliceWrapper struct {
orig *[]byte
state *State
}
-func GetOrigByteSlice(ms ByteSlice) *[]byte {
+func GetByteSliceOrig(ms ByteSliceWrapper) *[]byte {
return ms.orig
}
-func GetByteSliceState(ms ByteSlice) *State {
+func GetByteSliceState(ms ByteSliceWrapper) *State {
return ms.state
}
-func NewByteSlice(orig *[]byte, state *State) ByteSlice {
- return ByteSlice{orig: orig, state: state}
+func NewByteSliceWrapper(orig *[]byte, state *State) ByteSliceWrapper {
+ return ByteSliceWrapper{orig: orig, state: state}
}
-func GenerateTestByteSlice() ByteSlice {
- orig := GenerateOrigTestByteSlice()
- return NewByteSlice(&orig, NewState())
+func GenTestByteSliceWrapper() ByteSliceWrapper {
+ orig := []byte{1, 2, 3}
+ return NewByteSliceWrapper(&orig, NewState())
}
-func CopyOrigByteSlice(dst, src []byte) []byte {
- return append(dst[:0], src...)
-}
-
-func GenerateOrigTestByteSlice() []byte {
+func GenTestByteSlice() []byte {
return []byte{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go
index 36b694815..b4e4ea12e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go
@@ -6,271 +6,23 @@
package internal
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-type EntityRef struct {
- orig *otlpcommon.EntityRef
+type EntityRefWrapper struct {
+ orig *EntityRef
state *State
}
-func GetOrigEntityRef(ms EntityRef) *otlpcommon.EntityRef {
+func GetEntityRefOrig(ms EntityRefWrapper) *EntityRef {
return ms.orig
}
-func GetEntityRefState(ms EntityRef) *State {
+func GetEntityRefState(ms EntityRefWrapper) *State {
return ms.state
}
-func NewEntityRef(orig *otlpcommon.EntityRef, state *State) EntityRef {
- return EntityRef{orig: orig, state: state}
-}
-
-var (
- protoPoolEntityRef = sync.Pool{
- New: func() any {
- return &otlpcommon.EntityRef{}
- },
- }
-)
-
-func NewOrigEntityRef() *otlpcommon.EntityRef {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.EntityRef{}
- }
- return protoPoolEntityRef.Get().(*otlpcommon.EntityRef)
-}
-
-func DeleteOrigEntityRef(orig *otlpcommon.EntityRef, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolEntityRef.Put(orig)
- }
-}
-
-func CopyOrigEntityRef(dest, src *otlpcommon.EntityRef) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.SchemaUrl = src.SchemaUrl
- dest.Type = src.Type
- dest.IdKeys = CopyOrigStringSlice(dest.IdKeys, src.IdKeys)
- dest.DescriptionKeys = CopyOrigStringSlice(dest.DescriptionKeys, src.DescriptionKeys)
-}
-
-func GenTestOrigEntityRef() *otlpcommon.EntityRef {
- orig := NewOrigEntityRef()
- orig.SchemaUrl = "test_schemaurl"
- orig.Type = "test_type"
- orig.IdKeys = GenerateOrigTestStringSlice()
- orig.DescriptionKeys = GenerateOrigTestStringSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigEntityRef(orig *otlpcommon.EntityRef, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- if orig.Type != "" {
- dest.WriteObjectField("type")
- dest.WriteString(orig.Type)
- }
- if len(orig.IdKeys) > 0 {
- dest.WriteObjectField("idKeys")
- dest.WriteArrayStart()
- dest.WriteString(orig.IdKeys[0])
- for i := 1; i < len(orig.IdKeys); i++ {
- dest.WriteMore()
- dest.WriteString(orig.IdKeys[i])
- }
- dest.WriteArrayEnd()
- }
- if len(orig.DescriptionKeys) > 0 {
- dest.WriteObjectField("descriptionKeys")
- dest.WriteArrayStart()
- dest.WriteString(orig.DescriptionKeys[0])
- for i := 1; i < len(orig.DescriptionKeys); i++ {
- dest.WriteMore()
- dest.WriteString(orig.DescriptionKeys[i])
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigEntityRef unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigEntityRef(orig *otlpcommon.EntityRef, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- case "type":
- orig.Type = iter.ReadString()
- case "idKeys", "id_keys":
- for iter.ReadArray() {
- orig.IdKeys = append(orig.IdKeys, iter.ReadString())
- }
-
- case "descriptionKeys", "description_keys":
- for iter.ReadArray() {
- orig.DescriptionKeys = append(orig.DescriptionKeys, iter.ReadString())
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigEntityRef(orig *otlpcommon.EntityRef) int {
- var n int
- var l int
- _ = l
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.Type)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for _, s := range orig.IdKeys {
- l = len(s)
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for _, s := range orig.DescriptionKeys {
- l = len(s)
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
+func NewEntityRefWrapper(orig *EntityRef, state *State) EntityRefWrapper {
+ return EntityRefWrapper{orig: orig, state: state}
}
-func MarshalProtoOrigEntityRef(orig *otlpcommon.EntityRef, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- l = len(orig.Type)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Type)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- for i := len(orig.IdKeys) - 1; i >= 0; i-- {
- l = len(orig.IdKeys[i])
- pos -= l
- copy(buf[pos:], orig.IdKeys[i])
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- for i := len(orig.DescriptionKeys) - 1; i >= 0; i-- {
- l = len(orig.DescriptionKeys[i])
- pos -= l
- copy(buf[pos:], orig.DescriptionKeys[i])
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x22
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigEntityRef(orig *otlpcommon.EntityRef, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Type = string(buf[startPos:pos])
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.IdKeys = append(orig.IdKeys, string(buf[startPos:pos]))
-
- case 4:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.DescriptionKeys = append(orig.DescriptionKeys, string(buf[startPos:pos]))
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
+func GenTestEntityRefWrapper() EntityRefWrapper {
+ return NewEntityRefWrapper(GenTestEntityRef(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go
index ecd851ff6..8e71ec1de 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go
@@ -6,68 +6,24 @@
package internal
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type EntityRefSlice struct {
- orig *[]*otlpcommon.EntityRef
+type EntityRefSliceWrapper struct {
+ orig *[]*EntityRef
state *State
}
-func GetOrigEntityRefSlice(ms EntityRefSlice) *[]*otlpcommon.EntityRef {
+func GetEntityRefSliceOrig(ms EntityRefSliceWrapper) *[]*EntityRef {
return ms.orig
}
-func GetEntityRefSliceState(ms EntityRefSlice) *State {
+func GetEntityRefSliceState(ms EntityRefSliceWrapper) *State {
return ms.state
}
-func NewEntityRefSlice(orig *[]*otlpcommon.EntityRef, state *State) EntityRefSlice {
- return EntityRefSlice{orig: orig, state: state}
-}
-
-func GenerateTestEntityRefSlice() EntityRefSlice {
- orig := GenerateOrigTestEntityRefSlice()
- return NewEntityRefSlice(&orig, NewState())
-}
-
-func CopyOrigEntityRefSlice(dest, src []*otlpcommon.EntityRef) []*otlpcommon.EntityRef {
- var newDest []*otlpcommon.EntityRef
- if cap(dest) < len(src) {
- newDest = make([]*otlpcommon.EntityRef, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigEntityRef()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigEntityRef(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigEntityRef()
- }
- }
- for i := range src {
- CopyOrigEntityRef(newDest[i], src[i])
- }
- return newDest
+func NewEntityRefSliceWrapper(orig *[]*EntityRef, state *State) EntityRefSliceWrapper {
+ return EntityRefSliceWrapper{orig: orig, state: state}
}
-func GenerateOrigTestEntityRefSlice() []*otlpcommon.EntityRef {
- orig := make([]*otlpcommon.EntityRef, 5)
- orig[0] = NewOrigEntityRef()
- orig[1] = GenTestOrigEntityRef()
- orig[2] = NewOrigEntityRef()
- orig[3] = GenTestOrigEntityRef()
- orig[4] = NewOrigEntityRef()
- return orig
+func GenTestEntityRefSliceWrapper() EntityRefSliceWrapper {
+ orig := GenTestEntityRefPtrSlice()
+ return NewEntityRefSliceWrapper(&orig, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplarslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplarslice.go
deleted file mode 100644
index ab91c7c59..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exemplarslice.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigExemplarSlice(dest, src []otlpmetrics.Exemplar) []otlpmetrics.Exemplar {
- var newDest []otlpmetrics.Exemplar
- if cap(dest) < len(src) {
- newDest = make([]otlpmetrics.Exemplar, len(src))
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigExemplar(&dest[i], false)
- }
- }
- for i := range src {
- CopyOrigExemplar(&newDest[i], &src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestExemplarSlice() []otlpmetrics.Exemplar {
- orig := make([]otlpmetrics.Exemplar, 5)
- orig[1] = *GenTestOrigExemplar()
- orig[3] = *GenTestOrigExemplar()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogram.go
deleted file mode 100644
index 1f4838d74..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogram.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExponentialHistogram = sync.Pool{
- New: func() any {
- return &otlpmetrics.ExponentialHistogram{}
- },
- }
-)
-
-func NewOrigExponentialHistogram() *otlpmetrics.ExponentialHistogram {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.ExponentialHistogram{}
- }
- return protoPoolExponentialHistogram.Get().(*otlpmetrics.ExponentialHistogram)
-}
-
-func DeleteOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.DataPoints {
- DeleteOrigExponentialHistogramDataPoint(orig.DataPoints[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolExponentialHistogram.Put(orig)
- }
-}
-
-func CopyOrigExponentialHistogram(dest, src *otlpmetrics.ExponentialHistogram) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.DataPoints = CopyOrigExponentialHistogramDataPointSlice(dest.DataPoints, src.DataPoints)
- dest.AggregationTemporality = src.AggregationTemporality
-}
-
-func GenTestOrigExponentialHistogram() *otlpmetrics.ExponentialHistogram {
- orig := NewOrigExponentialHistogram()
- orig.DataPoints = GenerateOrigTestExponentialHistogramDataPointSlice()
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1)
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.DataPoints) > 0 {
- dest.WriteObjectField("dataPoints")
- dest.WriteArrayStart()
- MarshalJSONOrigExponentialHistogramDataPoint(orig.DataPoints[0], dest)
- for i := 1; i < len(orig.DataPoints); i++ {
- dest.WriteMore()
- MarshalJSONOrigExponentialHistogramDataPoint(orig.DataPoints[i], dest)
- }
- dest.WriteArrayEnd()
- }
-
- if int32(orig.AggregationTemporality) != 0 {
- dest.WriteObjectField("aggregationTemporality")
- dest.WriteInt32(int32(orig.AggregationTemporality))
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExponentialHistogram unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "dataPoints", "data_points":
- for iter.ReadArray() {
- orig.DataPoints = append(orig.DataPoints, NewOrigExponentialHistogramDataPoint())
- UnmarshalJSONOrigExponentialHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
- }
-
- case "aggregationTemporality", "aggregation_temporality":
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value))
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram) int {
- var n int
- var l int
- _ = l
- for i := range orig.DataPoints {
- l = SizeProtoOrigExponentialHistogramDataPoint(orig.DataPoints[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.AggregationTemporality != 0 {
- n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
- }
- return n
-}
-
-func MarshalProtoOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.DataPoints) - 1; i >= 0; i-- {
- l = MarshalProtoOrigExponentialHistogramDataPoint(orig.DataPoints[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- if orig.AggregationTemporality != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
- pos--
- buf[pos] = 0x10
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.DataPoints = append(orig.DataPoints, NewOrigExponentialHistogramDataPoint())
- err = UnmarshalProtoOrigExponentialHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num)
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint_buckets.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint_buckets.go
deleted file mode 100644
index f7f14878b..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapoint_buckets.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExponentialHistogramDataPoint_Buckets = sync.Pool{
- New: func() any {
- return &otlpmetrics.ExponentialHistogramDataPoint_Buckets{}
- },
- }
-)
-
-func NewOrigExponentialHistogramDataPoint_Buckets() *otlpmetrics.ExponentialHistogramDataPoint_Buckets {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.ExponentialHistogramDataPoint_Buckets{}
- }
- return protoPoolExponentialHistogramDataPoint_Buckets.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Buckets)
-}
-
-func DeleteOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolExponentialHistogramDataPoint_Buckets.Put(orig)
- }
-}
-
-func CopyOrigExponentialHistogramDataPoint_Buckets(dest, src *otlpmetrics.ExponentialHistogramDataPoint_Buckets) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Offset = src.Offset
- dest.BucketCounts = CopyOrigUint64Slice(dest.BucketCounts, src.BucketCounts)
-}
-
-func GenTestOrigExponentialHistogramDataPoint_Buckets() *otlpmetrics.ExponentialHistogramDataPoint_Buckets {
- orig := NewOrigExponentialHistogramDataPoint_Buckets()
- orig.Offset = int32(13)
- orig.BucketCounts = GenerateOrigTestUint64Slice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.Offset != int32(0) {
- dest.WriteObjectField("offset")
- dest.WriteInt32(orig.Offset)
- }
- if len(orig.BucketCounts) > 0 {
- dest.WriteObjectField("bucketCounts")
- dest.WriteArrayStart()
- dest.WriteUint64(orig.BucketCounts[0])
- for i := 1; i < len(orig.BucketCounts); i++ {
- dest.WriteMore()
- dest.WriteUint64(orig.BucketCounts[i])
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExponentialHistogramDataPointBuckets unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "offset":
- orig.Offset = iter.ReadInt32()
- case "bucketCounts", "bucket_counts":
- for iter.ReadArray() {
- orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets) int {
- var n int
- var l int
- _ = l
- if orig.Offset != 0 {
- n += 1 + proto.Soz(uint64(orig.Offset))
- }
- if len(orig.BucketCounts) > 0 {
- l = 0
- for _, e := range orig.BucketCounts {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.Offset != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Offset)<<1)^uint32(orig.Offset>>31)))
- pos--
- buf[pos] = 0x8
- }
- l = len(orig.BucketCounts)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.BucketCounts[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x12
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.Offset = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.BucketCounts = append(orig.BucketCounts, uint64(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapointslice.go
deleted file mode 100644
index 021c0d0a2..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exponentialhistogramdatapointslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigExponentialHistogramDataPointSlice(dest, src []*otlpmetrics.ExponentialHistogramDataPoint) []*otlpmetrics.ExponentialHistogramDataPoint {
- var newDest []*otlpmetrics.ExponentialHistogramDataPoint
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.ExponentialHistogramDataPoint, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigExponentialHistogramDataPoint()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigExponentialHistogramDataPoint(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigExponentialHistogramDataPoint()
- }
- }
- for i := range src {
- CopyOrigExponentialHistogramDataPoint(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestExponentialHistogramDataPointSlice() []*otlpmetrics.ExponentialHistogramDataPoint {
- orig := make([]*otlpmetrics.ExponentialHistogramDataPoint, 5)
- orig[0] = NewOrigExponentialHistogramDataPoint()
- orig[1] = GenTestOrigExponentialHistogramDataPoint()
- orig[2] = NewOrigExponentialHistogramDataPoint()
- orig[3] = GenTestOrigExponentialHistogramDataPoint()
- orig[4] = NewOrigExponentialHistogramDataPoint()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogspartialsuccess.go
deleted file mode 100644
index 4a55de6de..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogspartialsuccess.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportLogsPartialSuccess = sync.Pool{
- New: func() any {
- return &otlpcollectorlogs.ExportLogsPartialSuccess{}
- },
- }
-)
-
-func NewOrigExportLogsPartialSuccess() *otlpcollectorlogs.ExportLogsPartialSuccess {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectorlogs.ExportLogsPartialSuccess{}
- }
- return protoPoolExportLogsPartialSuccess.Get().(*otlpcollectorlogs.ExportLogsPartialSuccess)
-}
-
-func DeleteOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolExportLogsPartialSuccess.Put(orig)
- }
-}
-
-func CopyOrigExportLogsPartialSuccess(dest, src *otlpcollectorlogs.ExportLogsPartialSuccess) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.RejectedLogRecords = src.RejectedLogRecords
- dest.ErrorMessage = src.ErrorMessage
-}
-
-func GenTestOrigExportLogsPartialSuccess() *otlpcollectorlogs.ExportLogsPartialSuccess {
- orig := NewOrigExportLogsPartialSuccess()
- orig.RejectedLogRecords = int64(13)
- orig.ErrorMessage = "test_errormessage"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.RejectedLogRecords != int64(0) {
- dest.WriteObjectField("rejectedLogRecords")
- dest.WriteInt64(orig.RejectedLogRecords)
- }
- if orig.ErrorMessage != "" {
- dest.WriteObjectField("errorMessage")
- dest.WriteString(orig.ErrorMessage)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "rejectedLogRecords", "rejected_log_records":
- orig.RejectedLogRecords = iter.ReadInt64()
- case "errorMessage", "error_message":
- orig.ErrorMessage = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess) int {
- var n int
- var l int
- _ = l
- if orig.RejectedLogRecords != 0 {
- n += 1 + proto.Sov(uint64(orig.RejectedLogRecords))
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.RejectedLogRecords != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedLogRecords))
- pos--
- buf[pos] = 0x8
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.ErrorMessage)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.RejectedLogRecords = int64(num)
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ErrorMessage = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go
index 090e82363..a529cd105 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go
@@ -6,174 +6,23 @@
package internal
-import (
- "fmt"
- "sync"
-
- otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-type Logs struct {
- orig *otlpcollectorlogs.ExportLogsServiceRequest
+type LogsWrapper struct {
+ orig *ExportLogsServiceRequest
state *State
}
-func GetOrigLogs(ms Logs) *otlpcollectorlogs.ExportLogsServiceRequest {
+func GetLogsOrig(ms LogsWrapper) *ExportLogsServiceRequest {
return ms.orig
}
-func GetLogsState(ms Logs) *State {
+func GetLogsState(ms LogsWrapper) *State {
return ms.state
}
-func NewLogs(orig *otlpcollectorlogs.ExportLogsServiceRequest, state *State) Logs {
- return Logs{orig: orig, state: state}
+func NewLogsWrapper(orig *ExportLogsServiceRequest, state *State) LogsWrapper {
+ return LogsWrapper{orig: orig, state: state}
}
-var (
- protoPoolExportLogsServiceRequest = sync.Pool{
- New: func() any {
- return &otlpcollectorlogs.ExportLogsServiceRequest{}
- },
- }
-)
-
-func NewOrigExportLogsServiceRequest() *otlpcollectorlogs.ExportLogsServiceRequest {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectorlogs.ExportLogsServiceRequest{}
- }
- return protoPoolExportLogsServiceRequest.Get().(*otlpcollectorlogs.ExportLogsServiceRequest)
-}
-
-func DeleteOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.ResourceLogs {
- DeleteOrigResourceLogs(orig.ResourceLogs[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolExportLogsServiceRequest.Put(orig)
- }
-}
-
-func CopyOrigExportLogsServiceRequest(dest, src *otlpcollectorlogs.ExportLogsServiceRequest) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.ResourceLogs = CopyOrigResourceLogsSlice(dest.ResourceLogs, src.ResourceLogs)
-}
-
-func GenTestOrigExportLogsServiceRequest() *otlpcollectorlogs.ExportLogsServiceRequest {
- orig := NewOrigExportLogsServiceRequest()
- orig.ResourceLogs = GenerateOrigTestResourceLogsSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.ResourceLogs) > 0 {
- dest.WriteObjectField("resourceLogs")
- dest.WriteArrayStart()
- MarshalJSONOrigResourceLogs(orig.ResourceLogs[0], dest)
- for i := 1; i < len(orig.ResourceLogs); i++ {
- dest.WriteMore()
- MarshalJSONOrigResourceLogs(orig.ResourceLogs[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigLogs unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resourceLogs", "resource_logs":
- for iter.ReadArray() {
- orig.ResourceLogs = append(orig.ResourceLogs, NewOrigResourceLogs())
- UnmarshalJSONOrigResourceLogs(orig.ResourceLogs[len(orig.ResourceLogs)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest) int {
- var n int
- var l int
- _ = l
- for i := range orig.ResourceLogs {
- l = SizeProtoOrigResourceLogs(orig.ResourceLogs[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
- l = MarshalProtoOrigResourceLogs(orig.ResourceLogs[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ResourceLogs = append(orig.ResourceLogs, NewOrigResourceLogs())
- err = UnmarshalProtoOrigResourceLogs(orig.ResourceLogs[len(orig.ResourceLogs)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
+func GenTestLogsWrapper() LogsWrapper {
+ return NewLogsWrapper(GenTestExportLogsServiceRequest(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsserviceresponse.go
deleted file mode 100644
index 13bb93a04..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsserviceresponse.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportLogsServiceResponse = sync.Pool{
- New: func() any {
- return &otlpcollectorlogs.ExportLogsServiceResponse{}
- },
- }
-)
-
-func NewOrigExportLogsServiceResponse() *otlpcollectorlogs.ExportLogsServiceResponse {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectorlogs.ExportLogsServiceResponse{}
- }
- return protoPoolExportLogsServiceResponse.Get().(*otlpcollectorlogs.ExportLogsServiceResponse)
-}
-
-func DeleteOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigExportLogsPartialSuccess(&orig.PartialSuccess, false)
-
- orig.Reset()
- if nullable {
- protoPoolExportLogsServiceResponse.Put(orig)
- }
-}
-
-func CopyOrigExportLogsServiceResponse(dest, src *otlpcollectorlogs.ExportLogsServiceResponse) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigExportLogsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
-}
-
-func GenTestOrigExportLogsServiceResponse() *otlpcollectorlogs.ExportLogsServiceResponse {
- orig := NewOrigExportLogsServiceResponse()
- orig.PartialSuccess = *GenTestOrigExportLogsPartialSuccess()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("partialSuccess")
- MarshalJSONOrigExportLogsPartialSuccess(&orig.PartialSuccess, dest)
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "partialSuccess", "partial_success":
- UnmarshalJSONOrigExportLogsPartialSuccess(&orig.PartialSuccess, iter)
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigExportLogsPartialSuccess(&orig.PartialSuccess)
- n += 1 + proto.Sov(uint64(l)) + l
- return n
-}
-
-func MarshalProtoOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigExportLogsPartialSuccess(&orig.PartialSuccess, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigExportLogsPartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricspartialsuccess.go
deleted file mode 100644
index 8bf2baeb2..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricspartialsuccess.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportMetricsPartialSuccess = sync.Pool{
- New: func() any {
- return &otlpcollectormetrics.ExportMetricsPartialSuccess{}
- },
- }
-)
-
-func NewOrigExportMetricsPartialSuccess() *otlpcollectormetrics.ExportMetricsPartialSuccess {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectormetrics.ExportMetricsPartialSuccess{}
- }
- return protoPoolExportMetricsPartialSuccess.Get().(*otlpcollectormetrics.ExportMetricsPartialSuccess)
-}
-
-func DeleteOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolExportMetricsPartialSuccess.Put(orig)
- }
-}
-
-func CopyOrigExportMetricsPartialSuccess(dest, src *otlpcollectormetrics.ExportMetricsPartialSuccess) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.RejectedDataPoints = src.RejectedDataPoints
- dest.ErrorMessage = src.ErrorMessage
-}
-
-func GenTestOrigExportMetricsPartialSuccess() *otlpcollectormetrics.ExportMetricsPartialSuccess {
- orig := NewOrigExportMetricsPartialSuccess()
- orig.RejectedDataPoints = int64(13)
- orig.ErrorMessage = "test_errormessage"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.RejectedDataPoints != int64(0) {
- dest.WriteObjectField("rejectedDataPoints")
- dest.WriteInt64(orig.RejectedDataPoints)
- }
- if orig.ErrorMessage != "" {
- dest.WriteObjectField("errorMessage")
- dest.WriteString(orig.ErrorMessage)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "rejectedDataPoints", "rejected_data_points":
- orig.RejectedDataPoints = iter.ReadInt64()
- case "errorMessage", "error_message":
- orig.ErrorMessage = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess) int {
- var n int
- var l int
- _ = l
- if orig.RejectedDataPoints != 0 {
- n += 1 + proto.Sov(uint64(orig.RejectedDataPoints))
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.RejectedDataPoints != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedDataPoints))
- pos--
- buf[pos] = 0x8
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.ErrorMessage)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.RejectedDataPoints = int64(num)
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ErrorMessage = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go
index b3c06e55e..0d378e4d7 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go
@@ -6,174 +6,23 @@
package internal
-import (
- "fmt"
- "sync"
-
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-type Metrics struct {
- orig *otlpcollectormetrics.ExportMetricsServiceRequest
+type MetricsWrapper struct {
+ orig *ExportMetricsServiceRequest
state *State
}
-func GetOrigMetrics(ms Metrics) *otlpcollectormetrics.ExportMetricsServiceRequest {
+func GetMetricsOrig(ms MetricsWrapper) *ExportMetricsServiceRequest {
return ms.orig
}
-func GetMetricsState(ms Metrics) *State {
+func GetMetricsState(ms MetricsWrapper) *State {
return ms.state
}
-func NewMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *State) Metrics {
- return Metrics{orig: orig, state: state}
+func NewMetricsWrapper(orig *ExportMetricsServiceRequest, state *State) MetricsWrapper {
+ return MetricsWrapper{orig: orig, state: state}
}
-var (
- protoPoolExportMetricsServiceRequest = sync.Pool{
- New: func() any {
- return &otlpcollectormetrics.ExportMetricsServiceRequest{}
- },
- }
-)
-
-func NewOrigExportMetricsServiceRequest() *otlpcollectormetrics.ExportMetricsServiceRequest {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectormetrics.ExportMetricsServiceRequest{}
- }
- return protoPoolExportMetricsServiceRequest.Get().(*otlpcollectormetrics.ExportMetricsServiceRequest)
-}
-
-func DeleteOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.ResourceMetrics {
- DeleteOrigResourceMetrics(orig.ResourceMetrics[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolExportMetricsServiceRequest.Put(orig)
- }
-}
-
-func CopyOrigExportMetricsServiceRequest(dest, src *otlpcollectormetrics.ExportMetricsServiceRequest) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.ResourceMetrics = CopyOrigResourceMetricsSlice(dest.ResourceMetrics, src.ResourceMetrics)
-}
-
-func GenTestOrigExportMetricsServiceRequest() *otlpcollectormetrics.ExportMetricsServiceRequest {
- orig := NewOrigExportMetricsServiceRequest()
- orig.ResourceMetrics = GenerateOrigTestResourceMetricsSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.ResourceMetrics) > 0 {
- dest.WriteObjectField("resourceMetrics")
- dest.WriteArrayStart()
- MarshalJSONOrigResourceMetrics(orig.ResourceMetrics[0], dest)
- for i := 1; i < len(orig.ResourceMetrics); i++ {
- dest.WriteMore()
- MarshalJSONOrigResourceMetrics(orig.ResourceMetrics[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigMetrics unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resourceMetrics", "resource_metrics":
- for iter.ReadArray() {
- orig.ResourceMetrics = append(orig.ResourceMetrics, NewOrigResourceMetrics())
- UnmarshalJSONOrigResourceMetrics(orig.ResourceMetrics[len(orig.ResourceMetrics)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest) int {
- var n int
- var l int
- _ = l
- for i := range orig.ResourceMetrics {
- l = SizeProtoOrigResourceMetrics(orig.ResourceMetrics[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
- l = MarshalProtoOrigResourceMetrics(orig.ResourceMetrics[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ResourceMetrics = append(orig.ResourceMetrics, NewOrigResourceMetrics())
- err = UnmarshalProtoOrigResourceMetrics(orig.ResourceMetrics[len(orig.ResourceMetrics)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
+func GenTestMetricsWrapper() MetricsWrapper {
+ return NewMetricsWrapper(GenTestExportMetricsServiceRequest(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsserviceresponse.go
deleted file mode 100644
index bdc770f47..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsserviceresponse.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportMetricsServiceResponse = sync.Pool{
- New: func() any {
- return &otlpcollectormetrics.ExportMetricsServiceResponse{}
- },
- }
-)
-
-func NewOrigExportMetricsServiceResponse() *otlpcollectormetrics.ExportMetricsServiceResponse {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectormetrics.ExportMetricsServiceResponse{}
- }
- return protoPoolExportMetricsServiceResponse.Get().(*otlpcollectormetrics.ExportMetricsServiceResponse)
-}
-
-func DeleteOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigExportMetricsPartialSuccess(&orig.PartialSuccess, false)
-
- orig.Reset()
- if nullable {
- protoPoolExportMetricsServiceResponse.Put(orig)
- }
-}
-
-func CopyOrigExportMetricsServiceResponse(dest, src *otlpcollectormetrics.ExportMetricsServiceResponse) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigExportMetricsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
-}
-
-func GenTestOrigExportMetricsServiceResponse() *otlpcollectormetrics.ExportMetricsServiceResponse {
- orig := NewOrigExportMetricsServiceResponse()
- orig.PartialSuccess = *GenTestOrigExportMetricsPartialSuccess()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("partialSuccess")
- MarshalJSONOrigExportMetricsPartialSuccess(&orig.PartialSuccess, dest)
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "partialSuccess", "partial_success":
- UnmarshalJSONOrigExportMetricsPartialSuccess(&orig.PartialSuccess, iter)
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigExportMetricsPartialSuccess(&orig.PartialSuccess)
- n += 1 + proto.Sov(uint64(l)) + l
- return n
-}
-
-func MarshalProtoOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigExportMetricsPartialSuccess(&orig.PartialSuccess, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigExportMetricsPartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilespartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilespartialsuccess.go
deleted file mode 100644
index f1305cc2d..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilespartialsuccess.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportProfilesPartialSuccess = sync.Pool{
- New: func() any {
- return &otlpcollectorprofiles.ExportProfilesPartialSuccess{}
- },
- }
-)
-
-func NewOrigExportProfilesPartialSuccess() *otlpcollectorprofiles.ExportProfilesPartialSuccess {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectorprofiles.ExportProfilesPartialSuccess{}
- }
- return protoPoolExportProfilesPartialSuccess.Get().(*otlpcollectorprofiles.ExportProfilesPartialSuccess)
-}
-
-func DeleteOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolExportProfilesPartialSuccess.Put(orig)
- }
-}
-
-func CopyOrigExportProfilesPartialSuccess(dest, src *otlpcollectorprofiles.ExportProfilesPartialSuccess) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.RejectedProfiles = src.RejectedProfiles
- dest.ErrorMessage = src.ErrorMessage
-}
-
-func GenTestOrigExportProfilesPartialSuccess() *otlpcollectorprofiles.ExportProfilesPartialSuccess {
- orig := NewOrigExportProfilesPartialSuccess()
- orig.RejectedProfiles = int64(13)
- orig.ErrorMessage = "test_errormessage"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.RejectedProfiles != int64(0) {
- dest.WriteObjectField("rejectedProfiles")
- dest.WriteInt64(orig.RejectedProfiles)
- }
- if orig.ErrorMessage != "" {
- dest.WriteObjectField("errorMessage")
- dest.WriteString(orig.ErrorMessage)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "rejectedProfiles", "rejected_profiles":
- orig.RejectedProfiles = iter.ReadInt64()
- case "errorMessage", "error_message":
- orig.ErrorMessage = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess) int {
- var n int
- var l int
- _ = l
- if orig.RejectedProfiles != 0 {
- n += 1 + proto.Sov(uint64(orig.RejectedProfiles))
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.RejectedProfiles != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedProfiles))
- pos--
- buf[pos] = 0x8
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.ErrorMessage)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.RejectedProfiles = int64(num)
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ErrorMessage = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go
index 0cf9fdc9f..8533626be 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go
@@ -6,206 +6,23 @@
package internal
-import (
- "fmt"
- "sync"
-
- otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-type Profiles struct {
- orig *otlpcollectorprofiles.ExportProfilesServiceRequest
+type ProfilesWrapper struct {
+ orig *ExportProfilesServiceRequest
state *State
}
-func GetOrigProfiles(ms Profiles) *otlpcollectorprofiles.ExportProfilesServiceRequest {
+func GetProfilesOrig(ms ProfilesWrapper) *ExportProfilesServiceRequest {
return ms.orig
}
-func GetProfilesState(ms Profiles) *State {
+func GetProfilesState(ms ProfilesWrapper) *State {
return ms.state
}
-func NewProfiles(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, state *State) Profiles {
- return Profiles{orig: orig, state: state}
-}
-
-var (
- protoPoolExportProfilesServiceRequest = sync.Pool{
- New: func() any {
- return &otlpcollectorprofiles.ExportProfilesServiceRequest{}
- },
- }
-)
-
-func NewOrigExportProfilesServiceRequest() *otlpcollectorprofiles.ExportProfilesServiceRequest {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectorprofiles.ExportProfilesServiceRequest{}
- }
- return protoPoolExportProfilesServiceRequest.Get().(*otlpcollectorprofiles.ExportProfilesServiceRequest)
+func NewProfilesWrapper(orig *ExportProfilesServiceRequest, state *State) ProfilesWrapper {
+ return ProfilesWrapper{orig: orig, state: state}
}
-func DeleteOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.ResourceProfiles {
- DeleteOrigResourceProfiles(orig.ResourceProfiles[i], true)
- }
- DeleteOrigProfilesDictionary(&orig.Dictionary, false)
-
- orig.Reset()
- if nullable {
- protoPoolExportProfilesServiceRequest.Put(orig)
- }
-}
-
-func CopyOrigExportProfilesServiceRequest(dest, src *otlpcollectorprofiles.ExportProfilesServiceRequest) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.ResourceProfiles = CopyOrigResourceProfilesSlice(dest.ResourceProfiles, src.ResourceProfiles)
- CopyOrigProfilesDictionary(&dest.Dictionary, &src.Dictionary)
-}
-
-func GenTestOrigExportProfilesServiceRequest() *otlpcollectorprofiles.ExportProfilesServiceRequest {
- orig := NewOrigExportProfilesServiceRequest()
- orig.ResourceProfiles = GenerateOrigTestResourceProfilesSlice()
- orig.Dictionary = *GenTestOrigProfilesDictionary()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.ResourceProfiles) > 0 {
- dest.WriteObjectField("resourceProfiles")
- dest.WriteArrayStart()
- MarshalJSONOrigResourceProfiles(orig.ResourceProfiles[0], dest)
- for i := 1; i < len(orig.ResourceProfiles); i++ {
- dest.WriteMore()
- MarshalJSONOrigResourceProfiles(orig.ResourceProfiles[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectField("dictionary")
- MarshalJSONOrigProfilesDictionary(&orig.Dictionary, dest)
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigProfiles unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resourceProfiles", "resource_profiles":
- for iter.ReadArray() {
- orig.ResourceProfiles = append(orig.ResourceProfiles, NewOrigResourceProfiles())
- UnmarshalJSONOrigResourceProfiles(orig.ResourceProfiles[len(orig.ResourceProfiles)-1], iter)
- }
-
- case "dictionary":
- UnmarshalJSONOrigProfilesDictionary(&orig.Dictionary, iter)
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest) int {
- var n int
- var l int
- _ = l
- for i := range orig.ResourceProfiles {
- l = SizeProtoOrigResourceProfiles(orig.ResourceProfiles[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = SizeProtoOrigProfilesDictionary(&orig.Dictionary)
- n += 1 + proto.Sov(uint64(l)) + l
- return n
-}
-
-func MarshalProtoOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
- l = MarshalProtoOrigResourceProfiles(orig.ResourceProfiles[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
-
- l = MarshalProtoOrigProfilesDictionary(&orig.Dictionary, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
-
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ResourceProfiles = append(orig.ResourceProfiles, NewOrigResourceProfiles())
- err = UnmarshalProtoOrigResourceProfiles(orig.ResourceProfiles[len(orig.ResourceProfiles)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigProfilesDictionary(&orig.Dictionary, buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
+func GenTestProfilesWrapper() ProfilesWrapper {
+ return NewProfilesWrapper(GenTestExportProfilesServiceRequest(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesserviceresponse.go
deleted file mode 100644
index 57f666fb0..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesserviceresponse.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportProfilesServiceResponse = sync.Pool{
- New: func() any {
- return &otlpcollectorprofiles.ExportProfilesServiceResponse{}
- },
- }
-)
-
-func NewOrigExportProfilesServiceResponse() *otlpcollectorprofiles.ExportProfilesServiceResponse {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectorprofiles.ExportProfilesServiceResponse{}
- }
- return protoPoolExportProfilesServiceResponse.Get().(*otlpcollectorprofiles.ExportProfilesServiceResponse)
-}
-
-func DeleteOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigExportProfilesPartialSuccess(&orig.PartialSuccess, false)
-
- orig.Reset()
- if nullable {
- protoPoolExportProfilesServiceResponse.Put(orig)
- }
-}
-
-func CopyOrigExportProfilesServiceResponse(dest, src *otlpcollectorprofiles.ExportProfilesServiceResponse) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigExportProfilesPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
-}
-
-func GenTestOrigExportProfilesServiceResponse() *otlpcollectorprofiles.ExportProfilesServiceResponse {
- orig := NewOrigExportProfilesServiceResponse()
- orig.PartialSuccess = *GenTestOrigExportProfilesPartialSuccess()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("partialSuccess")
- MarshalJSONOrigExportProfilesPartialSuccess(&orig.PartialSuccess, dest)
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "partialSuccess", "partial_success":
- UnmarshalJSONOrigExportProfilesPartialSuccess(&orig.PartialSuccess, iter)
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigExportProfilesPartialSuccess(&orig.PartialSuccess)
- n += 1 + proto.Sov(uint64(l)) + l
- return n
-}
-
-func MarshalProtoOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigExportProfilesPartialSuccess(&orig.PartialSuccess, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigExportProfilesPartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttracepartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttracepartialsuccess.go
deleted file mode 100644
index bd9774602..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttracepartialsuccess.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportTracePartialSuccess = sync.Pool{
- New: func() any {
- return &otlpcollectortrace.ExportTracePartialSuccess{}
- },
- }
-)
-
-func NewOrigExportTracePartialSuccess() *otlpcollectortrace.ExportTracePartialSuccess {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectortrace.ExportTracePartialSuccess{}
- }
- return protoPoolExportTracePartialSuccess.Get().(*otlpcollectortrace.ExportTracePartialSuccess)
-}
-
-func DeleteOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolExportTracePartialSuccess.Put(orig)
- }
-}
-
-func CopyOrigExportTracePartialSuccess(dest, src *otlpcollectortrace.ExportTracePartialSuccess) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.RejectedSpans = src.RejectedSpans
- dest.ErrorMessage = src.ErrorMessage
-}
-
-func GenTestOrigExportTracePartialSuccess() *otlpcollectortrace.ExportTracePartialSuccess {
- orig := NewOrigExportTracePartialSuccess()
- orig.RejectedSpans = int64(13)
- orig.ErrorMessage = "test_errormessage"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.RejectedSpans != int64(0) {
- dest.WriteObjectField("rejectedSpans")
- dest.WriteInt64(orig.RejectedSpans)
- }
- if orig.ErrorMessage != "" {
- dest.WriteObjectField("errorMessage")
- dest.WriteString(orig.ErrorMessage)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "rejectedSpans", "rejected_spans":
- orig.RejectedSpans = iter.ReadInt64()
- case "errorMessage", "error_message":
- orig.ErrorMessage = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess) int {
- var n int
- var l int
- _ = l
- if orig.RejectedSpans != 0 {
- n += 1 + proto.Sov(uint64(orig.RejectedSpans))
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.RejectedSpans != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedSpans))
- pos--
- buf[pos] = 0x8
- }
- l = len(orig.ErrorMessage)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.ErrorMessage)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.RejectedSpans = int64(num)
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ErrorMessage = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go
index 4ce9efc1e..75d82979d 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go
@@ -6,174 +6,23 @@
package internal
-import (
- "fmt"
- "sync"
-
- otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-type Traces struct {
- orig *otlpcollectortrace.ExportTraceServiceRequest
+type TracesWrapper struct {
+ orig *ExportTraceServiceRequest
state *State
}
-func GetOrigTraces(ms Traces) *otlpcollectortrace.ExportTraceServiceRequest {
+func GetTracesOrig(ms TracesWrapper) *ExportTraceServiceRequest {
return ms.orig
}
-func GetTracesState(ms Traces) *State {
+func GetTracesState(ms TracesWrapper) *State {
return ms.state
}
-func NewTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *State) Traces {
- return Traces{orig: orig, state: state}
+func NewTracesWrapper(orig *ExportTraceServiceRequest, state *State) TracesWrapper {
+ return TracesWrapper{orig: orig, state: state}
}
-var (
- protoPoolExportTraceServiceRequest = sync.Pool{
- New: func() any {
- return &otlpcollectortrace.ExportTraceServiceRequest{}
- },
- }
-)
-
-func NewOrigExportTraceServiceRequest() *otlpcollectortrace.ExportTraceServiceRequest {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectortrace.ExportTraceServiceRequest{}
- }
- return protoPoolExportTraceServiceRequest.Get().(*otlpcollectortrace.ExportTraceServiceRequest)
-}
-
-func DeleteOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.ResourceSpans {
- DeleteOrigResourceSpans(orig.ResourceSpans[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolExportTraceServiceRequest.Put(orig)
- }
-}
-
-func CopyOrigExportTraceServiceRequest(dest, src *otlpcollectortrace.ExportTraceServiceRequest) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.ResourceSpans = CopyOrigResourceSpansSlice(dest.ResourceSpans, src.ResourceSpans)
-}
-
-func GenTestOrigExportTraceServiceRequest() *otlpcollectortrace.ExportTraceServiceRequest {
- orig := NewOrigExportTraceServiceRequest()
- orig.ResourceSpans = GenerateOrigTestResourceSpansSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.ResourceSpans) > 0 {
- dest.WriteObjectField("resourceSpans")
- dest.WriteArrayStart()
- MarshalJSONOrigResourceSpans(orig.ResourceSpans[0], dest)
- for i := 1; i < len(orig.ResourceSpans); i++ {
- dest.WriteMore()
- MarshalJSONOrigResourceSpans(orig.ResourceSpans[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigTraces unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resourceSpans", "resource_spans":
- for iter.ReadArray() {
- orig.ResourceSpans = append(orig.ResourceSpans, NewOrigResourceSpans())
- UnmarshalJSONOrigResourceSpans(orig.ResourceSpans[len(orig.ResourceSpans)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest) int {
- var n int
- var l int
- _ = l
- for i := range orig.ResourceSpans {
- l = SizeProtoOrigResourceSpans(orig.ResourceSpans[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
- l = MarshalProtoOrigResourceSpans(orig.ResourceSpans[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ResourceSpans = append(orig.ResourceSpans, NewOrigResourceSpans())
- err = UnmarshalProtoOrigResourceSpans(orig.ResourceSpans[len(orig.ResourceSpans)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
+func GenTestTracesWrapper() TracesWrapper {
+ return NewTracesWrapper(GenTestExportTraceServiceRequest(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceserviceresponse.go
deleted file mode 100644
index 293386b30..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceserviceresponse.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolExportTraceServiceResponse = sync.Pool{
- New: func() any {
- return &otlpcollectortrace.ExportTraceServiceResponse{}
- },
- }
-)
-
-func NewOrigExportTraceServiceResponse() *otlpcollectortrace.ExportTraceServiceResponse {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcollectortrace.ExportTraceServiceResponse{}
- }
- return protoPoolExportTraceServiceResponse.Get().(*otlpcollectortrace.ExportTraceServiceResponse)
-}
-
-func DeleteOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigExportTracePartialSuccess(&orig.PartialSuccess, false)
-
- orig.Reset()
- if nullable {
- protoPoolExportTraceServiceResponse.Put(orig)
- }
-}
-
-func CopyOrigExportTraceServiceResponse(dest, src *otlpcollectortrace.ExportTraceServiceResponse) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigExportTracePartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
-}
-
-func GenTestOrigExportTraceServiceResponse() *otlpcollectortrace.ExportTraceServiceResponse {
- orig := NewOrigExportTraceServiceResponse()
- orig.PartialSuccess = *GenTestOrigExportTracePartialSuccess()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("partialSuccess")
- MarshalJSONOrigExportTracePartialSuccess(&orig.PartialSuccess, dest)
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigExportResponse unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "partialSuccess", "partial_success":
- UnmarshalJSONOrigExportTracePartialSuccess(&orig.PartialSuccess, iter)
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigExportTracePartialSuccess(&orig.PartialSuccess)
- n += 1 + proto.Sov(uint64(l)) + l
- return n
-}
-
-func MarshalProtoOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigExportTracePartialSuccess(&orig.PartialSuccess, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigExportTracePartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go
index 9f21f74ff..8d0d6ea6c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go
@@ -6,32 +6,28 @@
package internal
-type Float64Slice struct {
+type Float64SliceWrapper struct {
orig *[]float64
state *State
}
-func GetOrigFloat64Slice(ms Float64Slice) *[]float64 {
+func GetFloat64SliceOrig(ms Float64SliceWrapper) *[]float64 {
return ms.orig
}
-func GetFloat64SliceState(ms Float64Slice) *State {
+func GetFloat64SliceState(ms Float64SliceWrapper) *State {
return ms.state
}
-func NewFloat64Slice(orig *[]float64, state *State) Float64Slice {
- return Float64Slice{orig: orig, state: state}
+func NewFloat64SliceWrapper(orig *[]float64, state *State) Float64SliceWrapper {
+ return Float64SliceWrapper{orig: orig, state: state}
}
-func GenerateTestFloat64Slice() Float64Slice {
- orig := GenerateOrigTestFloat64Slice()
- return NewFloat64Slice(&orig, NewState())
+func GenTestFloat64SliceWrapper() Float64SliceWrapper {
+ orig := []float64{1.1, 2.2, 3.3}
+ return NewFloat64SliceWrapper(&orig, NewState())
}
-func CopyOrigFloat64Slice(dst, src []float64) []float64 {
- return append(dst[:0], src...)
-}
-
-func GenerateOrigTestFloat64Slice() []float64 {
+func GenTestFloat64Slice() []float64 {
return []float64{1.1, 2.2, 3.3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_functionslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_functionslice.go
deleted file mode 100644
index 4295cf216..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_functionslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigFunctionSlice(dest, src []*otlpprofiles.Function) []*otlpprofiles.Function {
- var newDest []*otlpprofiles.Function
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.Function, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigFunction()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigFunction(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigFunction()
- }
- }
- for i := range src {
- CopyOrigFunction(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestFunctionSlice() []*otlpprofiles.Function {
- orig := make([]*otlpprofiles.Function, 5)
- orig[0] = NewOrigFunction()
- orig[1] = GenTestOrigFunction()
- orig[2] = NewOrigFunction()
- orig[3] = GenTestOrigFunction()
- orig[4] = NewOrigFunction()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_gauge.go
deleted file mode 100644
index 7fd96f464..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_gauge.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolGauge = sync.Pool{
- New: func() any {
- return &otlpmetrics.Gauge{}
- },
- }
-)
-
-func NewOrigGauge() *otlpmetrics.Gauge {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.Gauge{}
- }
- return protoPoolGauge.Get().(*otlpmetrics.Gauge)
-}
-
-func DeleteOrigGauge(orig *otlpmetrics.Gauge, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.DataPoints {
- DeleteOrigNumberDataPoint(orig.DataPoints[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolGauge.Put(orig)
- }
-}
-
-func CopyOrigGauge(dest, src *otlpmetrics.Gauge) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.DataPoints = CopyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints)
-}
-
-func GenTestOrigGauge() *otlpmetrics.Gauge {
- orig := NewOrigGauge()
- orig.DataPoints = GenerateOrigTestNumberDataPointSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigGauge(orig *otlpmetrics.Gauge, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.DataPoints) > 0 {
- dest.WriteObjectField("dataPoints")
- dest.WriteArrayStart()
- MarshalJSONOrigNumberDataPoint(orig.DataPoints[0], dest)
- for i := 1; i < len(orig.DataPoints); i++ {
- dest.WriteMore()
- MarshalJSONOrigNumberDataPoint(orig.DataPoints[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigGauge unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigGauge(orig *otlpmetrics.Gauge, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "dataPoints", "data_points":
- for iter.ReadArray() {
- orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint())
- UnmarshalJSONOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigGauge(orig *otlpmetrics.Gauge) int {
- var n int
- var l int
- _ = l
- for i := range orig.DataPoints {
- l = SizeProtoOrigNumberDataPoint(orig.DataPoints[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigGauge(orig *otlpmetrics.Gauge, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.DataPoints) - 1; i >= 0; i-- {
- l = MarshalProtoOrigNumberDataPoint(orig.DataPoints[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigGauge(orig *otlpmetrics.Gauge, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.DataPoints = append(orig.DataPoints, NewOrigNumberDataPoint())
- err = UnmarshalProtoOrigNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogram.go
deleted file mode 100644
index 006c222bf..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogram.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolHistogram = sync.Pool{
- New: func() any {
- return &otlpmetrics.Histogram{}
- },
- }
-)
-
-func NewOrigHistogram() *otlpmetrics.Histogram {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.Histogram{}
- }
- return protoPoolHistogram.Get().(*otlpmetrics.Histogram)
-}
-
-func DeleteOrigHistogram(orig *otlpmetrics.Histogram, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.DataPoints {
- DeleteOrigHistogramDataPoint(orig.DataPoints[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolHistogram.Put(orig)
- }
-}
-
-func CopyOrigHistogram(dest, src *otlpmetrics.Histogram) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.DataPoints = CopyOrigHistogramDataPointSlice(dest.DataPoints, src.DataPoints)
- dest.AggregationTemporality = src.AggregationTemporality
-}
-
-func GenTestOrigHistogram() *otlpmetrics.Histogram {
- orig := NewOrigHistogram()
- orig.DataPoints = GenerateOrigTestHistogramDataPointSlice()
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1)
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigHistogram(orig *otlpmetrics.Histogram, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.DataPoints) > 0 {
- dest.WriteObjectField("dataPoints")
- dest.WriteArrayStart()
- MarshalJSONOrigHistogramDataPoint(orig.DataPoints[0], dest)
- for i := 1; i < len(orig.DataPoints); i++ {
- dest.WriteMore()
- MarshalJSONOrigHistogramDataPoint(orig.DataPoints[i], dest)
- }
- dest.WriteArrayEnd()
- }
-
- if int32(orig.AggregationTemporality) != 0 {
- dest.WriteObjectField("aggregationTemporality")
- dest.WriteInt32(int32(orig.AggregationTemporality))
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigHistogram unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigHistogram(orig *otlpmetrics.Histogram, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "dataPoints", "data_points":
- for iter.ReadArray() {
- orig.DataPoints = append(orig.DataPoints, NewOrigHistogramDataPoint())
- UnmarshalJSONOrigHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
- }
-
- case "aggregationTemporality", "aggregation_temporality":
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value))
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigHistogram(orig *otlpmetrics.Histogram) int {
- var n int
- var l int
- _ = l
- for i := range orig.DataPoints {
- l = SizeProtoOrigHistogramDataPoint(orig.DataPoints[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.AggregationTemporality != 0 {
- n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
- }
- return n
-}
-
-func MarshalProtoOrigHistogram(orig *otlpmetrics.Histogram, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.DataPoints) - 1; i >= 0; i-- {
- l = MarshalProtoOrigHistogramDataPoint(orig.DataPoints[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- if orig.AggregationTemporality != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
- pos--
- buf[pos] = 0x10
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigHistogram(orig *otlpmetrics.Histogram, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.DataPoints = append(orig.DataPoints, NewOrigHistogramDataPoint())
- err = UnmarshalProtoOrigHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num)
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapointslice.go
deleted file mode 100644
index 210f02eab..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_histogramdatapointslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigHistogramDataPointSlice(dest, src []*otlpmetrics.HistogramDataPoint) []*otlpmetrics.HistogramDataPoint {
- var newDest []*otlpmetrics.HistogramDataPoint
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.HistogramDataPoint, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigHistogramDataPoint()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigHistogramDataPoint(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigHistogramDataPoint()
- }
- }
- for i := range src {
- CopyOrigHistogramDataPoint(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestHistogramDataPointSlice() []*otlpmetrics.HistogramDataPoint {
- orig := make([]*otlpmetrics.HistogramDataPoint, 5)
- orig[0] = NewOrigHistogramDataPoint()
- orig[1] = GenTestOrigHistogramDataPoint()
- orig[2] = NewOrigHistogramDataPoint()
- orig[3] = GenTestOrigHistogramDataPoint()
- orig[4] = NewOrigHistogramDataPoint()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go
index f9fea6cb8..5fbe72e12 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go
@@ -6,266 +6,23 @@
package internal
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-type InstrumentationScope struct {
- orig *otlpcommon.InstrumentationScope
+type InstrumentationScopeWrapper struct {
+ orig *InstrumentationScope
state *State
}
-func GetOrigInstrumentationScope(ms InstrumentationScope) *otlpcommon.InstrumentationScope {
+func GetInstrumentationScopeOrig(ms InstrumentationScopeWrapper) *InstrumentationScope {
return ms.orig
}
-func GetInstrumentationScopeState(ms InstrumentationScope) *State {
+func GetInstrumentationScopeState(ms InstrumentationScopeWrapper) *State {
return ms.state
}
-func NewInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *State) InstrumentationScope {
- return InstrumentationScope{orig: orig, state: state}
-}
-
-var (
- protoPoolInstrumentationScope = sync.Pool{
- New: func() any {
- return &otlpcommon.InstrumentationScope{}
- },
- }
-)
-
-func NewOrigInstrumentationScope() *otlpcommon.InstrumentationScope {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.InstrumentationScope{}
- }
- return protoPoolInstrumentationScope.Get().(*otlpcommon.InstrumentationScope)
+func NewInstrumentationScopeWrapper(orig *InstrumentationScope, state *State) InstrumentationScopeWrapper {
+ return InstrumentationScopeWrapper{orig: orig, state: state}
}
-func DeleteOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
- }
-
- orig.Reset()
- if nullable {
- protoPoolInstrumentationScope.Put(orig)
- }
-}
-
-func CopyOrigInstrumentationScope(dest, src *otlpcommon.InstrumentationScope) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Name = src.Name
- dest.Version = src.Version
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
-}
-
-func GenTestOrigInstrumentationScope() *otlpcommon.InstrumentationScope {
- orig := NewOrigInstrumentationScope()
- orig.Name = "test_name"
- orig.Version = "test_version"
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.DroppedAttributesCount = uint32(13)
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.Name != "" {
- dest.WriteObjectField("name")
- dest.WriteString(orig.Name)
- }
- if orig.Version != "" {
- dest.WriteObjectField("version")
- dest.WriteString(orig.Version)
- }
- if len(orig.Attributes) > 0 {
- dest.WriteObjectField("attributes")
- dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
- for i := 1; i < len(orig.Attributes); i++ {
- dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.DroppedAttributesCount != uint32(0) {
- dest.WriteObjectField("droppedAttributesCount")
- dest.WriteUint32(orig.DroppedAttributesCount)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigInstrumentationScope unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "name":
- orig.Name = iter.ReadString()
- case "version":
- orig.Version = iter.ReadString()
- case "attributes":
- for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
- }
-
- case "droppedAttributesCount", "dropped_attributes_count":
- orig.DroppedAttributesCount = iter.ReadUint32()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope) int {
- var n int
- var l int
- _ = l
- l = len(orig.Name)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.Version)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.DroppedAttributesCount != 0 {
- n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
- }
- return n
-}
-
-func MarshalProtoOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- l = len(orig.Name)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Name)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- l = len(orig.Version)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Version)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- if orig.DroppedAttributesCount != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
- pos--
- buf[pos] = 0x20
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigInstrumentationScope(orig *otlpcommon.InstrumentationScope, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Name = string(buf[startPos:pos])
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Version = string(buf[startPos:pos])
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 4:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.DroppedAttributesCount = uint32(num)
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
+func GenTestInstrumentationScopeWrapper() InstrumentationScopeWrapper {
+ return NewInstrumentationScopeWrapper(GenTestInstrumentationScope(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go
index 86ceaa8c5..ed66b2007 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go
@@ -6,32 +6,28 @@
package internal
-type Int32Slice struct {
+type Int32SliceWrapper struct {
orig *[]int32
state *State
}
-func GetOrigInt32Slice(ms Int32Slice) *[]int32 {
+func GetInt32SliceOrig(ms Int32SliceWrapper) *[]int32 {
return ms.orig
}
-func GetInt32SliceState(ms Int32Slice) *State {
+func GetInt32SliceState(ms Int32SliceWrapper) *State {
return ms.state
}
-func NewInt32Slice(orig *[]int32, state *State) Int32Slice {
- return Int32Slice{orig: orig, state: state}
+func NewInt32SliceWrapper(orig *[]int32, state *State) Int32SliceWrapper {
+ return Int32SliceWrapper{orig: orig, state: state}
}
-func GenerateTestInt32Slice() Int32Slice {
- orig := GenerateOrigTestInt32Slice()
- return NewInt32Slice(&orig, NewState())
+func GenTestInt32SliceWrapper() Int32SliceWrapper {
+ orig := []int32{1, 2, 3}
+ return NewInt32SliceWrapper(&orig, NewState())
}
-func CopyOrigInt32Slice(dst, src []int32) []int32 {
- return append(dst[:0], src...)
-}
-
-func GenerateOrigTestInt32Slice() []int32 {
+func GenTestInt32Slice() []int32 {
return []int32{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go
index ec4df4f49..9174f8632 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go
@@ -6,32 +6,28 @@
package internal
-type Int64Slice struct {
+type Int64SliceWrapper struct {
orig *[]int64
state *State
}
-func GetOrigInt64Slice(ms Int64Slice) *[]int64 {
+func GetInt64SliceOrig(ms Int64SliceWrapper) *[]int64 {
return ms.orig
}
-func GetInt64SliceState(ms Int64Slice) *State {
+func GetInt64SliceState(ms Int64SliceWrapper) *State {
return ms.state
}
-func NewInt64Slice(orig *[]int64, state *State) Int64Slice {
- return Int64Slice{orig: orig, state: state}
+func NewInt64SliceWrapper(orig *[]int64, state *State) Int64SliceWrapper {
+ return Int64SliceWrapper{orig: orig, state: state}
}
-func GenerateTestInt64Slice() Int64Slice {
- orig := GenerateOrigTestInt64Slice()
- return NewInt64Slice(&orig, NewState())
+func GenTestInt64SliceWrapper() Int64SliceWrapper {
+ orig := []int64{1, 2, 3}
+ return NewInt64SliceWrapper(&orig, NewState())
}
-func CopyOrigInt64Slice(dst, src []int64) []int64 {
- return append(dst[:0], src...)
-}
-
-func GenerateOrigTestInt64Slice() []int64 {
+func GenTestInt64Slice() []int64 {
return []int64{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalue.go
deleted file mode 100644
index 6150f448f..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalue.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolKeyValue = sync.Pool{
- New: func() any {
- return &otlpcommon.KeyValue{}
- },
- }
-)
-
-func NewOrigKeyValue() *otlpcommon.KeyValue {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.KeyValue{}
- }
- return protoPoolKeyValue.Get().(*otlpcommon.KeyValue)
-}
-
-func DeleteOrigKeyValue(orig *otlpcommon.KeyValue, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigAnyValue(&orig.Value, false)
-
- orig.Reset()
- if nullable {
- protoPoolKeyValue.Put(orig)
- }
-}
-
-func CopyOrigKeyValue(dest, src *otlpcommon.KeyValue) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Key = src.Key
- CopyOrigAnyValue(&dest.Value, &src.Value)
-}
-
-func GenTestOrigKeyValue() *otlpcommon.KeyValue {
- orig := NewOrigKeyValue()
- orig.Key = "test_key"
- orig.Value = *GenTestOrigAnyValue()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigKeyValue(orig *otlpcommon.KeyValue, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.Key != "" {
- dest.WriteObjectField("key")
- dest.WriteString(orig.Key)
- }
- dest.WriteObjectField("value")
- MarshalJSONOrigAnyValue(&orig.Value, dest)
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigAttribute unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigKeyValue(orig *otlpcommon.KeyValue, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "key":
- orig.Key = iter.ReadString()
- case "value":
- UnmarshalJSONOrigAnyValue(&orig.Value, iter)
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigKeyValue(orig *otlpcommon.KeyValue) int {
- var n int
- var l int
- _ = l
- l = len(orig.Key)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = SizeProtoOrigAnyValue(&orig.Value)
- n += 1 + proto.Sov(uint64(l)) + l
- return n
-}
-
-func MarshalProtoOrigKeyValue(orig *otlpcommon.KeyValue, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- l = len(orig.Key)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Key)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
-
- l = MarshalProtoOrigAnyValue(&orig.Value, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
-
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigKeyValue(orig *otlpcommon.KeyValue, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Key = string(buf[startPos:pos])
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigAnyValue(&orig.Value, buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvaluelist.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvaluelist.go
deleted file mode 100644
index eed254a6c..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvaluelist.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolKeyValueList = sync.Pool{
- New: func() any {
- return &otlpcommon.KeyValueList{}
- },
- }
-)
-
-func NewOrigKeyValueList() *otlpcommon.KeyValueList {
- if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.KeyValueList{}
- }
- return protoPoolKeyValueList.Get().(*otlpcommon.KeyValueList)
-}
-
-func DeleteOrigKeyValueList(orig *otlpcommon.KeyValueList, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.Values {
- DeleteOrigKeyValue(&orig.Values[i], false)
- }
-
- orig.Reset()
- if nullable {
- protoPoolKeyValueList.Put(orig)
- }
-}
-
-func CopyOrigKeyValueList(dest, src *otlpcommon.KeyValueList) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Values = CopyOrigKeyValueSlice(dest.Values, src.Values)
-}
-
-func GenTestOrigKeyValueList() *otlpcommon.KeyValueList {
- orig := NewOrigKeyValueList()
- orig.Values = GenerateOrigTestKeyValueSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigKeyValueList(orig *otlpcommon.KeyValueList, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.Values) > 0 {
- dest.WriteObjectField("values")
- dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Values[0], dest)
- for i := 1; i < len(orig.Values); i++ {
- dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Values[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigKeyValueList unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigKeyValueList(orig *otlpcommon.KeyValueList, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "values":
- for iter.ReadArray() {
- orig.Values = append(orig.Values, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Values[len(orig.Values)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigKeyValueList(orig *otlpcommon.KeyValueList) int {
- var n int
- var l int
- _ = l
- for i := range orig.Values {
- l = SizeProtoOrigKeyValue(&orig.Values[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigKeyValueList(orig *otlpcommon.KeyValueList, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.Values) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Values[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigKeyValueList(orig *otlpcommon.KeyValueList, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Values = append(orig.Values, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Values[len(orig.Values)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueslice.go
deleted file mode 100644
index 5143ee114..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_keyvalueslice.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-func CopyOrigKeyValueSlice(dest, src []otlpcommon.KeyValue) []otlpcommon.KeyValue {
- var newDest []otlpcommon.KeyValue
- if cap(dest) < len(src) {
- newDest = make([]otlpcommon.KeyValue, len(src))
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigKeyValue(&dest[i], false)
- }
- }
- for i := range src {
- CopyOrigKeyValue(&newDest[i], &src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestKeyValueSlice() []otlpcommon.KeyValue {
- orig := make([]otlpcommon.KeyValue, 5)
- orig[1] = *GenTestOrigKeyValue()
- orig[3] = *GenTestOrigKeyValue()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_lineslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_lineslice.go
deleted file mode 100644
index e43c1abab..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_lineslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigLineSlice(dest, src []*otlpprofiles.Line) []*otlpprofiles.Line {
- var newDest []*otlpprofiles.Line
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.Line, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLine()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigLine(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLine()
- }
- }
- for i := range src {
- CopyOrigLine(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestLineSlice() []*otlpprofiles.Line {
- orig := make([]*otlpprofiles.Line, 5)
- orig[0] = NewOrigLine()
- orig[1] = GenTestOrigLine()
- orig[2] = NewOrigLine()
- orig[3] = GenTestOrigLine()
- orig[4] = NewOrigLine()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_link.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_link.go
deleted file mode 100644
index 6fd643972..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_link.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolLink = sync.Pool{
- New: func() any {
- return &otlpprofiles.Link{}
- },
- }
-)
-
-func NewOrigLink() *otlpprofiles.Link {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.Link{}
- }
- return protoPoolLink.Get().(*otlpprofiles.Link)
-}
-
-func DeleteOrigLink(orig *otlpprofiles.Link, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigTraceID(&orig.TraceId, false)
- DeleteOrigSpanID(&orig.SpanId, false)
-
- orig.Reset()
- if nullable {
- protoPoolLink.Put(orig)
- }
-}
-
-func CopyOrigLink(dest, src *otlpprofiles.Link) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
-}
-
-func GenTestOrigLink() *otlpprofiles.Link {
- orig := NewOrigLink()
- orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
- orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigLink(orig *otlpprofiles.Link, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.TraceId != data.TraceID([16]byte{}) {
- dest.WriteObjectField("traceId")
- MarshalJSONOrigTraceID(&orig.TraceId, dest)
- }
- if orig.SpanId != data.SpanID([8]byte{}) {
- dest.WriteObjectField("spanId")
- MarshalJSONOrigSpanID(&orig.SpanId, dest)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigLink unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigLink(orig *otlpprofiles.Link, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "traceId", "trace_id":
- UnmarshalJSONOrigTraceID(&orig.TraceId, iter)
- case "spanId", "span_id":
- UnmarshalJSONOrigSpanID(&orig.SpanId, iter)
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigLink(orig *otlpprofiles.Link) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigTraceID(&orig.TraceId)
- n += 1 + proto.Sov(uint64(l)) + l
- l = SizeProtoOrigSpanID(&orig.SpanId)
- n += 1 + proto.Sov(uint64(l)) + l
- return n
-}
-
-func MarshalProtoOrigLink(orig *otlpprofiles.Link, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigTraceID(&orig.TraceId, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- l = MarshalProtoOrigSpanID(&orig.SpanId, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
-
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigLink(orig *otlpprofiles.Link, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigTraceID(&orig.TraceId, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigSpanID(&orig.SpanId, buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_linkslice.go
deleted file mode 100644
index 27b981ec1..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_linkslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigLinkSlice(dest, src []*otlpprofiles.Link) []*otlpprofiles.Link {
- var newDest []*otlpprofiles.Link
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.Link, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLink()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigLink(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLink()
- }
- }
- for i := range src {
- CopyOrigLink(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestLinkSlice() []*otlpprofiles.Link {
- orig := make([]*otlpprofiles.Link, 5)
- orig[0] = NewOrigLink()
- orig[1] = GenTestOrigLink()
- orig[2] = NewOrigLink()
- orig[3] = GenTestOrigLink()
- orig[4] = NewOrigLink()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_location.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_location.go
deleted file mode 100644
index fa88077d2..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_location.go
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolLocation = sync.Pool{
- New: func() any {
- return &otlpprofiles.Location{}
- },
- }
- ProtoPoolLocation_MappingIndex = sync.Pool{
- New: func() any {
- return &otlpprofiles.Location_MappingIndex{}
- },
- }
-)
-
-func NewOrigLocation() *otlpprofiles.Location {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.Location{}
- }
- return protoPoolLocation.Get().(*otlpprofiles.Location)
-}
-
-func DeleteOrigLocation(orig *otlpprofiles.Location, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- switch ov := orig.MappingIndex_.(type) {
- case *otlpprofiles.Location_MappingIndex:
- if UseProtoPooling.IsEnabled() {
- ov.MappingIndex = int32(0)
- ProtoPoolLocation_MappingIndex.Put(ov)
- }
-
- }
- for i := range orig.Line {
- DeleteOrigLine(orig.Line[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolLocation.Put(orig)
- }
-}
-
-func CopyOrigLocation(dest, src *otlpprofiles.Location) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- if srcMappingIndex, ok := src.MappingIndex_.(*otlpprofiles.Location_MappingIndex); ok {
- destMappingIndex, ok := dest.MappingIndex_.(*otlpprofiles.Location_MappingIndex)
- if !ok {
- destMappingIndex = &otlpprofiles.Location_MappingIndex{}
- dest.MappingIndex_ = destMappingIndex
- }
- destMappingIndex.MappingIndex = srcMappingIndex.MappingIndex
- } else {
- dest.MappingIndex_ = nil
- }
- dest.Address = src.Address
- dest.Line = CopyOrigLineSlice(dest.Line, src.Line)
- dest.IsFolded = src.IsFolded
- dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices)
-}
-
-func GenTestOrigLocation() *otlpprofiles.Location {
- orig := NewOrigLocation()
- orig.MappingIndex_ = &otlpprofiles.Location_MappingIndex{MappingIndex: int32(13)}
- orig.Address = uint64(13)
- orig.Line = GenerateOrigTestLineSlice()
- orig.IsFolded = true
- orig.AttributeIndices = GenerateOrigTestInt32Slice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigLocation(orig *otlpprofiles.Location, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig, ok := orig.MappingIndex_.(*otlpprofiles.Location_MappingIndex); ok {
- dest.WriteObjectField("mappingIndex")
- dest.WriteInt32(orig.MappingIndex)
- }
- if orig.Address != uint64(0) {
- dest.WriteObjectField("address")
- dest.WriteUint64(orig.Address)
- }
- if len(orig.Line) > 0 {
- dest.WriteObjectField("line")
- dest.WriteArrayStart()
- MarshalJSONOrigLine(orig.Line[0], dest)
- for i := 1; i < len(orig.Line); i++ {
- dest.WriteMore()
- MarshalJSONOrigLine(orig.Line[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.IsFolded != false {
- dest.WriteObjectField("isFolded")
- dest.WriteBool(orig.IsFolded)
- }
- if len(orig.AttributeIndices) > 0 {
- dest.WriteObjectField("attributeIndices")
- dest.WriteArrayStart()
- dest.WriteInt32(orig.AttributeIndices[0])
- for i := 1; i < len(orig.AttributeIndices); i++ {
- dest.WriteMore()
- dest.WriteInt32(orig.AttributeIndices[i])
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigLocation unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigLocation(orig *otlpprofiles.Location, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "mappingIndex", "mapping_index":
- {
- var ov *otlpprofiles.Location_MappingIndex
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpprofiles.Location_MappingIndex{}
- } else {
- ov = ProtoPoolLocation_MappingIndex.Get().(*otlpprofiles.Location_MappingIndex)
- }
- ov.MappingIndex = iter.ReadInt32()
- orig.MappingIndex_ = ov
- }
-
- case "address":
- orig.Address = iter.ReadUint64()
- case "line":
- for iter.ReadArray() {
- orig.Line = append(orig.Line, NewOrigLine())
- UnmarshalJSONOrigLine(orig.Line[len(orig.Line)-1], iter)
- }
-
- case "isFolded", "is_folded":
- orig.IsFolded = iter.ReadBool()
- case "attributeIndices", "attribute_indices":
- for iter.ReadArray() {
- orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigLocation(orig *otlpprofiles.Location) int {
- var n int
- var l int
- _ = l
- if orig, ok := orig.MappingIndex_.(*otlpprofiles.Location_MappingIndex); ok {
- _ = orig
- n += 1 + proto.Sov(uint64(orig.MappingIndex))
- }
- if orig.Address != 0 {
- n += 1 + proto.Sov(uint64(orig.Address))
- }
- for i := range orig.Line {
- l = SizeProtoOrigLine(orig.Line[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.IsFolded {
- n += 2
- }
- if len(orig.AttributeIndices) > 0 {
- l = 0
- for _, e := range orig.AttributeIndices {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig, ok := orig.MappingIndex_.(*otlpprofiles.Location_MappingIndex); ok {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.MappingIndex))
- pos--
- buf[pos] = 0x8
- }
- if orig.Address != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.Address))
- pos--
- buf[pos] = 0x10
- }
- for i := len(orig.Line) - 1; i >= 0; i-- {
- l = MarshalProtoOrigLine(orig.Line[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- if orig.IsFolded {
- pos--
- if orig.IsFolded {
- buf[pos] = 1
- } else {
- buf[pos] = 0
- }
- pos--
- buf[pos] = 0x20
- }
- l = len(orig.AttributeIndices)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x2a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigLocation(orig *otlpprofiles.Location, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
- var ov *otlpprofiles.Location_MappingIndex
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpprofiles.Location_MappingIndex{}
- } else {
- ov = ProtoPoolLocation_MappingIndex.Get().(*otlpprofiles.Location_MappingIndex)
- }
- ov.MappingIndex = int32(num)
- orig.MappingIndex_ = ov
-
- case 2:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.Address = uint64(num)
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Line = append(orig.Line, NewOrigLine())
- err = UnmarshalProtoOrigLine(orig.Line[len(orig.Line)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 4:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field IsFolded", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.IsFolded = num != 0
- case 5:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_locationslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_locationslice.go
deleted file mode 100644
index 83a10cc45..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_locationslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigLocationSlice(dest, src []*otlpprofiles.Location) []*otlpprofiles.Location {
- var newDest []*otlpprofiles.Location
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.Location, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLocation()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigLocation(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLocation()
- }
- }
- for i := range src {
- CopyOrigLocation(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestLocationSlice() []*otlpprofiles.Location {
- orig := make([]*otlpprofiles.Location, 5)
- orig[0] = NewOrigLocation()
- orig[1] = GenTestOrigLocation()
- orig[2] = NewOrigLocation()
- orig[3] = GenTestOrigLocation()
- orig[4] = NewOrigLocation()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecordslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecordslice.go
deleted file mode 100644
index 12770cf58..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_logrecordslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
-func CopyOrigLogRecordSlice(dest, src []*otlplogs.LogRecord) []*otlplogs.LogRecord {
- var newDest []*otlplogs.LogRecord
- if cap(dest) < len(src) {
- newDest = make([]*otlplogs.LogRecord, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLogRecord()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigLogRecord(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigLogRecord()
- }
- }
- for i := range src {
- CopyOrigLogRecord(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestLogRecordSlice() []*otlplogs.LogRecord {
- orig := make([]*otlplogs.LogRecord, 5)
- orig[0] = NewOrigLogRecord()
- orig[1] = GenTestOrigLogRecord()
- orig[2] = NewOrigLogRecord()
- orig[3] = GenTestOrigLogRecord()
- orig[4] = NewOrigLogRecord()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mappingslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mappingslice.go
deleted file mode 100644
index fa6e5bd41..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_mappingslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigMappingSlice(dest, src []*otlpprofiles.Mapping) []*otlpprofiles.Mapping {
- var newDest []*otlpprofiles.Mapping
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.Mapping, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigMapping()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigMapping(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigMapping()
- }
- }
- for i := range src {
- CopyOrigMapping(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestMappingSlice() []*otlpprofiles.Mapping {
- orig := make([]*otlpprofiles.Mapping, 5)
- orig[0] = NewOrigMapping()
- orig[1] = GenTestOrigMapping()
- orig[2] = NewOrigMapping()
- orig[3] = GenTestOrigMapping()
- orig[4] = NewOrigMapping()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metric.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metric.go
deleted file mode 100644
index 6dee89bdb..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metric.go
+++ /dev/null
@@ -1,635 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolMetric = sync.Pool{
- New: func() any {
- return &otlpmetrics.Metric{}
- },
- }
-
- ProtoPoolMetric_Gauge = sync.Pool{
- New: func() any {
- return &otlpmetrics.Metric_Gauge{}
- },
- }
-
- ProtoPoolMetric_Sum = sync.Pool{
- New: func() any {
- return &otlpmetrics.Metric_Sum{}
- },
- }
-
- ProtoPoolMetric_Histogram = sync.Pool{
- New: func() any {
- return &otlpmetrics.Metric_Histogram{}
- },
- }
-
- ProtoPoolMetric_ExponentialHistogram = sync.Pool{
- New: func() any {
- return &otlpmetrics.Metric_ExponentialHistogram{}
- },
- }
-
- ProtoPoolMetric_Summary = sync.Pool{
- New: func() any {
- return &otlpmetrics.Metric_Summary{}
- },
- }
-)
-
-func NewOrigMetric() *otlpmetrics.Metric {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.Metric{}
- }
- return protoPoolMetric.Get().(*otlpmetrics.Metric)
-}
-
-func DeleteOrigMetric(orig *otlpmetrics.Metric, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- switch ov := orig.Data.(type) {
- case *otlpmetrics.Metric_Gauge:
- DeleteOrigGauge(ov.Gauge, true)
- ov.Gauge = nil
- ProtoPoolMetric_Gauge.Put(ov)
- case *otlpmetrics.Metric_Sum:
- DeleteOrigSum(ov.Sum, true)
- ov.Sum = nil
- ProtoPoolMetric_Sum.Put(ov)
- case *otlpmetrics.Metric_Histogram:
- DeleteOrigHistogram(ov.Histogram, true)
- ov.Histogram = nil
- ProtoPoolMetric_Histogram.Put(ov)
- case *otlpmetrics.Metric_ExponentialHistogram:
- DeleteOrigExponentialHistogram(ov.ExponentialHistogram, true)
- ov.ExponentialHistogram = nil
- ProtoPoolMetric_ExponentialHistogram.Put(ov)
- case *otlpmetrics.Metric_Summary:
- DeleteOrigSummary(ov.Summary, true)
- ov.Summary = nil
- ProtoPoolMetric_Summary.Put(ov)
-
- }
- for i := range orig.Metadata {
- DeleteOrigKeyValue(&orig.Metadata[i], false)
- }
-
- orig.Reset()
- if nullable {
- protoPoolMetric.Put(orig)
- }
-}
-
-func CopyOrigMetric(dest, src *otlpmetrics.Metric) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Name = src.Name
- dest.Description = src.Description
- dest.Unit = src.Unit
- switch t := src.Data.(type) {
- case *otlpmetrics.Metric_Gauge:
- var ov *otlpmetrics.Metric_Gauge
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Gauge{}
- } else {
- ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge)
- }
- ov.Gauge = NewOrigGauge()
- CopyOrigGauge(ov.Gauge, t.Gauge)
- dest.Data = ov
- case *otlpmetrics.Metric_Sum:
- var ov *otlpmetrics.Metric_Sum
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Sum{}
- } else {
- ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum)
- }
- ov.Sum = NewOrigSum()
- CopyOrigSum(ov.Sum, t.Sum)
- dest.Data = ov
- case *otlpmetrics.Metric_Histogram:
- var ov *otlpmetrics.Metric_Histogram
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Histogram{}
- } else {
- ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram)
- }
- ov.Histogram = NewOrigHistogram()
- CopyOrigHistogram(ov.Histogram, t.Histogram)
- dest.Data = ov
- case *otlpmetrics.Metric_ExponentialHistogram:
- var ov *otlpmetrics.Metric_ExponentialHistogram
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_ExponentialHistogram{}
- } else {
- ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram)
- }
- ov.ExponentialHistogram = NewOrigExponentialHistogram()
- CopyOrigExponentialHistogram(ov.ExponentialHistogram, t.ExponentialHistogram)
- dest.Data = ov
- case *otlpmetrics.Metric_Summary:
- var ov *otlpmetrics.Metric_Summary
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Summary{}
- } else {
- ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary)
- }
- ov.Summary = NewOrigSummary()
- CopyOrigSummary(ov.Summary, t.Summary)
- dest.Data = ov
- }
- dest.Metadata = CopyOrigKeyValueSlice(dest.Metadata, src.Metadata)
-}
-
-func GenTestOrigMetric() *otlpmetrics.Metric {
- orig := NewOrigMetric()
- orig.Name = "test_name"
- orig.Description = "test_description"
- orig.Unit = "test_unit"
- orig.Data = &otlpmetrics.Metric_Sum{Sum: GenTestOrigSum()}
- orig.Metadata = GenerateOrigTestKeyValueSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigMetric(orig *otlpmetrics.Metric, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.Name != "" {
- dest.WriteObjectField("name")
- dest.WriteString(orig.Name)
- }
- if orig.Description != "" {
- dest.WriteObjectField("description")
- dest.WriteString(orig.Description)
- }
- if orig.Unit != "" {
- dest.WriteObjectField("unit")
- dest.WriteString(orig.Unit)
- }
- switch orig := orig.Data.(type) {
- case *otlpmetrics.Metric_Gauge:
- if orig.Gauge != nil {
- dest.WriteObjectField("gauge")
- MarshalJSONOrigGauge(orig.Gauge, dest)
- }
- case *otlpmetrics.Metric_Sum:
- if orig.Sum != nil {
- dest.WriteObjectField("sum")
- MarshalJSONOrigSum(orig.Sum, dest)
- }
- case *otlpmetrics.Metric_Histogram:
- if orig.Histogram != nil {
- dest.WriteObjectField("histogram")
- MarshalJSONOrigHistogram(orig.Histogram, dest)
- }
- case *otlpmetrics.Metric_ExponentialHistogram:
- if orig.ExponentialHistogram != nil {
- dest.WriteObjectField("exponentialHistogram")
- MarshalJSONOrigExponentialHistogram(orig.ExponentialHistogram, dest)
- }
- case *otlpmetrics.Metric_Summary:
- if orig.Summary != nil {
- dest.WriteObjectField("summary")
- MarshalJSONOrigSummary(orig.Summary, dest)
- }
- }
- if len(orig.Metadata) > 0 {
- dest.WriteObjectField("metadata")
- dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Metadata[0], dest)
- for i := 1; i < len(orig.Metadata); i++ {
- dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Metadata[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigMetric unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigMetric(orig *otlpmetrics.Metric, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "name":
- orig.Name = iter.ReadString()
- case "description":
- orig.Description = iter.ReadString()
- case "unit":
- orig.Unit = iter.ReadString()
-
- case "gauge":
- {
- var ov *otlpmetrics.Metric_Gauge
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Gauge{}
- } else {
- ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge)
- }
- ov.Gauge = NewOrigGauge()
- UnmarshalJSONOrigGauge(ov.Gauge, iter)
- orig.Data = ov
- }
-
- case "sum":
- {
- var ov *otlpmetrics.Metric_Sum
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Sum{}
- } else {
- ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum)
- }
- ov.Sum = NewOrigSum()
- UnmarshalJSONOrigSum(ov.Sum, iter)
- orig.Data = ov
- }
-
- case "histogram":
- {
- var ov *otlpmetrics.Metric_Histogram
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Histogram{}
- } else {
- ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram)
- }
- ov.Histogram = NewOrigHistogram()
- UnmarshalJSONOrigHistogram(ov.Histogram, iter)
- orig.Data = ov
- }
-
- case "exponentialHistogram", "exponential_histogram":
- {
- var ov *otlpmetrics.Metric_ExponentialHistogram
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_ExponentialHistogram{}
- } else {
- ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram)
- }
- ov.ExponentialHistogram = NewOrigExponentialHistogram()
- UnmarshalJSONOrigExponentialHistogram(ov.ExponentialHistogram, iter)
- orig.Data = ov
- }
-
- case "summary":
- {
- var ov *otlpmetrics.Metric_Summary
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Summary{}
- } else {
- ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary)
- }
- ov.Summary = NewOrigSummary()
- UnmarshalJSONOrigSummary(ov.Summary, iter)
- orig.Data = ov
- }
-
- case "metadata":
- for iter.ReadArray() {
- orig.Metadata = append(orig.Metadata, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Metadata[len(orig.Metadata)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigMetric(orig *otlpmetrics.Metric) int {
- var n int
- var l int
- _ = l
- l = len(orig.Name)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.Description)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.Unit)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- switch orig := orig.Data.(type) {
- case nil:
- _ = orig
- break
- case *otlpmetrics.Metric_Gauge:
- l = SizeProtoOrigGauge(orig.Gauge)
- n += 1 + proto.Sov(uint64(l)) + l
- case *otlpmetrics.Metric_Sum:
- l = SizeProtoOrigSum(orig.Sum)
- n += 1 + proto.Sov(uint64(l)) + l
- case *otlpmetrics.Metric_Histogram:
- l = SizeProtoOrigHistogram(orig.Histogram)
- n += 1 + proto.Sov(uint64(l)) + l
- case *otlpmetrics.Metric_ExponentialHistogram:
- l = SizeProtoOrigExponentialHistogram(orig.ExponentialHistogram)
- n += 1 + proto.Sov(uint64(l)) + l
- case *otlpmetrics.Metric_Summary:
- l = SizeProtoOrigSummary(orig.Summary)
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.Metadata {
- l = SizeProtoOrigKeyValue(&orig.Metadata[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigMetric(orig *otlpmetrics.Metric, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- l = len(orig.Name)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Name)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- l = len(orig.Description)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Description)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.Unit)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Unit)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- switch orig := orig.Data.(type) {
- case *otlpmetrics.Metric_Gauge:
-
- l = MarshalProtoOrigGauge(orig.Gauge, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x2a
-
- case *otlpmetrics.Metric_Sum:
-
- l = MarshalProtoOrigSum(orig.Sum, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x3a
-
- case *otlpmetrics.Metric_Histogram:
-
- l = MarshalProtoOrigHistogram(orig.Histogram, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x4a
-
- case *otlpmetrics.Metric_ExponentialHistogram:
-
- l = MarshalProtoOrigExponentialHistogram(orig.ExponentialHistogram, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x52
-
- case *otlpmetrics.Metric_Summary:
-
- l = MarshalProtoOrigSummary(orig.Summary, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x5a
-
- }
- for i := len(orig.Metadata) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Metadata[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x62
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigMetric(orig *otlpmetrics.Metric, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Name = string(buf[startPos:pos])
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Description = string(buf[startPos:pos])
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Unit = string(buf[startPos:pos])
-
- case 5:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpmetrics.Metric_Gauge
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Gauge{}
- } else {
- ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge)
- }
- ov.Gauge = NewOrigGauge()
- err = UnmarshalProtoOrigGauge(ov.Gauge, buf[startPos:pos])
- if err != nil {
- return err
- }
- orig.Data = ov
-
- case 7:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpmetrics.Metric_Sum
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Sum{}
- } else {
- ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum)
- }
- ov.Sum = NewOrigSum()
- err = UnmarshalProtoOrigSum(ov.Sum, buf[startPos:pos])
- if err != nil {
- return err
- }
- orig.Data = ov
-
- case 9:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpmetrics.Metric_Histogram
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Histogram{}
- } else {
- ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram)
- }
- ov.Histogram = NewOrigHistogram()
- err = UnmarshalProtoOrigHistogram(ov.Histogram, buf[startPos:pos])
- if err != nil {
- return err
- }
- orig.Data = ov
-
- case 10:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpmetrics.Metric_ExponentialHistogram
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_ExponentialHistogram{}
- } else {
- ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram)
- }
- ov.ExponentialHistogram = NewOrigExponentialHistogram()
- err = UnmarshalProtoOrigExponentialHistogram(ov.ExponentialHistogram, buf[startPos:pos])
- if err != nil {
- return err
- }
- orig.Data = ov
-
- case 11:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var ov *otlpmetrics.Metric_Summary
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpmetrics.Metric_Summary{}
- } else {
- ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary)
- }
- ov.Summary = NewOrigSummary()
- err = UnmarshalProtoOrigSummary(ov.Summary, buf[startPos:pos])
- if err != nil {
- return err
- }
- orig.Data = ov
-
- case 12:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Metadata = append(orig.Metadata, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Metadata[len(orig.Metadata)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metricslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metricslice.go
deleted file mode 100644
index b6b82452d..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_metricslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigMetricSlice(dest, src []*otlpmetrics.Metric) []*otlpmetrics.Metric {
- var newDest []*otlpmetrics.Metric
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.Metric, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigMetric()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigMetric(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigMetric()
- }
- }
- for i := range src {
- CopyOrigMetric(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestMetricSlice() []*otlpmetrics.Metric {
- orig := make([]*otlpmetrics.Metric, 5)
- orig[0] = NewOrigMetric()
- orig[1] = GenTestOrigMetric()
- orig[2] = NewOrigMetric()
- orig[3] = GenTestOrigMetric()
- orig[4] = NewOrigMetric()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapointslice.go
deleted file mode 100644
index 3ac7b17ab..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_numberdatapointslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigNumberDataPointSlice(dest, src []*otlpmetrics.NumberDataPoint) []*otlpmetrics.NumberDataPoint {
- var newDest []*otlpmetrics.NumberDataPoint
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.NumberDataPoint, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigNumberDataPoint()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigNumberDataPoint(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigNumberDataPoint()
- }
- }
- for i := range src {
- CopyOrigNumberDataPoint(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestNumberDataPointSlice() []*otlpmetrics.NumberDataPoint {
- orig := make([]*otlpmetrics.NumberDataPoint, 5)
- orig[0] = NewOrigNumberDataPoint()
- orig[1] = GenTestOrigNumberDataPoint()
- orig[2] = NewOrigNumberDataPoint()
- orig[3] = GenTestOrigNumberDataPoint()
- orig[4] = NewOrigNumberDataPoint()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profile.go
deleted file mode 100644
index 2ede2e3f4..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profile.go
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolProfile = sync.Pool{
- New: func() any {
- return &otlpprofiles.Profile{}
- },
- }
-)
-
-func NewOrigProfile() *otlpprofiles.Profile {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.Profile{}
- }
- return protoPoolProfile.Get().(*otlpprofiles.Profile)
-}
-
-func DeleteOrigProfile(orig *otlpprofiles.Profile, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.SampleType {
- DeleteOrigValueType(orig.SampleType[i], true)
- }
- for i := range orig.Sample {
- DeleteOrigSample(orig.Sample[i], true)
- }
- DeleteOrigValueType(&orig.PeriodType, false)
- DeleteOrigProfileID(&orig.ProfileId, false)
-
- orig.Reset()
- if nullable {
- protoPoolProfile.Put(orig)
- }
-}
-
-func CopyOrigProfile(dest, src *otlpprofiles.Profile) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.SampleType = CopyOrigValueTypeSlice(dest.SampleType, src.SampleType)
- dest.Sample = CopyOrigSampleSlice(dest.Sample, src.Sample)
- dest.LocationIndices = CopyOrigInt32Slice(dest.LocationIndices, src.LocationIndices)
- dest.TimeNanos = src.TimeNanos
- dest.DurationNanos = src.DurationNanos
- CopyOrigValueType(&dest.PeriodType, &src.PeriodType)
- dest.Period = src.Period
- dest.CommentStrindices = CopyOrigInt32Slice(dest.CommentStrindices, src.CommentStrindices)
- dest.DefaultSampleTypeIndex = src.DefaultSampleTypeIndex
- dest.ProfileId = src.ProfileId
- dest.DroppedAttributesCount = src.DroppedAttributesCount
- dest.OriginalPayloadFormat = src.OriginalPayloadFormat
- dest.OriginalPayload = CopyOrigByteSlice(dest.OriginalPayload, src.OriginalPayload)
- dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices)
-}
-
-func GenTestOrigProfile() *otlpprofiles.Profile {
- orig := NewOrigProfile()
- orig.SampleType = GenerateOrigTestValueTypeSlice()
- orig.Sample = GenerateOrigTestSampleSlice()
- orig.LocationIndices = GenerateOrigTestInt32Slice()
- orig.TimeNanos = 1234567890
- orig.DurationNanos = 1234567890
- orig.PeriodType = *GenTestOrigValueType()
- orig.Period = int64(13)
- orig.CommentStrindices = GenerateOrigTestInt32Slice()
- orig.DefaultSampleTypeIndex = int32(13)
- orig.ProfileId = data.ProfileID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
- orig.DroppedAttributesCount = uint32(13)
- orig.OriginalPayloadFormat = "test_originalpayloadformat"
- orig.OriginalPayload = GenerateOrigTestByteSlice()
- orig.AttributeIndices = GenerateOrigTestInt32Slice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigProfile(orig *otlpprofiles.Profile, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.SampleType) > 0 {
- dest.WriteObjectField("sampleType")
- dest.WriteArrayStart()
- MarshalJSONOrigValueType(orig.SampleType[0], dest)
- for i := 1; i < len(orig.SampleType); i++ {
- dest.WriteMore()
- MarshalJSONOrigValueType(orig.SampleType[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if len(orig.Sample) > 0 {
- dest.WriteObjectField("sample")
- dest.WriteArrayStart()
- MarshalJSONOrigSample(orig.Sample[0], dest)
- for i := 1; i < len(orig.Sample); i++ {
- dest.WriteMore()
- MarshalJSONOrigSample(orig.Sample[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if len(orig.LocationIndices) > 0 {
- dest.WriteObjectField("locationIndices")
- dest.WriteArrayStart()
- dest.WriteInt32(orig.LocationIndices[0])
- for i := 1; i < len(orig.LocationIndices); i++ {
- dest.WriteMore()
- dest.WriteInt32(orig.LocationIndices[i])
- }
- dest.WriteArrayEnd()
- }
- if orig.TimeNanos != int64(0) {
- dest.WriteObjectField("timeNanos")
- dest.WriteInt64(orig.TimeNanos)
- }
- if orig.DurationNanos != int64(0) {
- dest.WriteObjectField("durationNanos")
- dest.WriteInt64(orig.DurationNanos)
- }
- dest.WriteObjectField("periodType")
- MarshalJSONOrigValueType(&orig.PeriodType, dest)
- if orig.Period != int64(0) {
- dest.WriteObjectField("period")
- dest.WriteInt64(orig.Period)
- }
- if len(orig.CommentStrindices) > 0 {
- dest.WriteObjectField("commentStrindices")
- dest.WriteArrayStart()
- dest.WriteInt32(orig.CommentStrindices[0])
- for i := 1; i < len(orig.CommentStrindices); i++ {
- dest.WriteMore()
- dest.WriteInt32(orig.CommentStrindices[i])
- }
- dest.WriteArrayEnd()
- }
- if orig.DefaultSampleTypeIndex != int32(0) {
- dest.WriteObjectField("defaultSampleTypeIndex")
- dest.WriteInt32(orig.DefaultSampleTypeIndex)
- }
- if orig.ProfileId != data.ProfileID([16]byte{}) {
- dest.WriteObjectField("profileId")
- MarshalJSONOrigProfileID(&orig.ProfileId, dest)
- }
- if orig.DroppedAttributesCount != uint32(0) {
- dest.WriteObjectField("droppedAttributesCount")
- dest.WriteUint32(orig.DroppedAttributesCount)
- }
- if orig.OriginalPayloadFormat != "" {
- dest.WriteObjectField("originalPayloadFormat")
- dest.WriteString(orig.OriginalPayloadFormat)
- }
-
- if len(orig.OriginalPayload) > 0 {
- dest.WriteObjectField("originalPayload")
- dest.WriteBytes(orig.OriginalPayload)
- }
- if len(orig.AttributeIndices) > 0 {
- dest.WriteObjectField("attributeIndices")
- dest.WriteArrayStart()
- dest.WriteInt32(orig.AttributeIndices[0])
- for i := 1; i < len(orig.AttributeIndices); i++ {
- dest.WriteMore()
- dest.WriteInt32(orig.AttributeIndices[i])
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigProfile unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigProfile(orig *otlpprofiles.Profile, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "sampleType", "sample_type":
- for iter.ReadArray() {
- orig.SampleType = append(orig.SampleType, NewOrigValueType())
- UnmarshalJSONOrigValueType(orig.SampleType[len(orig.SampleType)-1], iter)
- }
-
- case "sample":
- for iter.ReadArray() {
- orig.Sample = append(orig.Sample, NewOrigSample())
- UnmarshalJSONOrigSample(orig.Sample[len(orig.Sample)-1], iter)
- }
-
- case "locationIndices", "location_indices":
- for iter.ReadArray() {
- orig.LocationIndices = append(orig.LocationIndices, iter.ReadInt32())
- }
-
- case "timeNanos", "time_nanos":
- orig.TimeNanos = iter.ReadInt64()
- case "durationNanos", "duration_nanos":
- orig.DurationNanos = iter.ReadInt64()
- case "periodType", "period_type":
- UnmarshalJSONOrigValueType(&orig.PeriodType, iter)
- case "period":
- orig.Period = iter.ReadInt64()
- case "commentStrindices", "comment_strindices":
- for iter.ReadArray() {
- orig.CommentStrindices = append(orig.CommentStrindices, iter.ReadInt32())
- }
-
- case "defaultSampleTypeIndex", "default_sample_type_index":
- orig.DefaultSampleTypeIndex = iter.ReadInt32()
- case "profileId", "profile_id":
- UnmarshalJSONOrigProfileID(&orig.ProfileId, iter)
- case "droppedAttributesCount", "dropped_attributes_count":
- orig.DroppedAttributesCount = iter.ReadUint32()
- case "originalPayloadFormat", "original_payload_format":
- orig.OriginalPayloadFormat = iter.ReadString()
- case "originalPayload", "original_payload":
- orig.OriginalPayload = iter.ReadBytes()
- case "attributeIndices", "attribute_indices":
- for iter.ReadArray() {
- orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigProfile(orig *otlpprofiles.Profile) int {
- var n int
- var l int
- _ = l
- for i := range orig.SampleType {
- l = SizeProtoOrigValueType(orig.SampleType[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.Sample {
- l = SizeProtoOrigSample(orig.Sample[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if len(orig.LocationIndices) > 0 {
- l = 0
- for _, e := range orig.LocationIndices {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.TimeNanos != 0 {
- n += 1 + proto.Sov(uint64(orig.TimeNanos))
- }
- if orig.DurationNanos != 0 {
- n += 1 + proto.Sov(uint64(orig.DurationNanos))
- }
- l = SizeProtoOrigValueType(&orig.PeriodType)
- n += 1 + proto.Sov(uint64(l)) + l
- if orig.Period != 0 {
- n += 1 + proto.Sov(uint64(orig.Period))
- }
- if len(orig.CommentStrindices) > 0 {
- l = 0
- for _, e := range orig.CommentStrindices {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.DefaultSampleTypeIndex != 0 {
- n += 1 + proto.Sov(uint64(orig.DefaultSampleTypeIndex))
- }
- l = SizeProtoOrigProfileID(&orig.ProfileId)
- n += 1 + proto.Sov(uint64(l)) + l
- if orig.DroppedAttributesCount != 0 {
- n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
- }
- l = len(orig.OriginalPayloadFormat)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.OriginalPayload)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if len(orig.AttributeIndices) > 0 {
- l = 0
- for _, e := range orig.AttributeIndices {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.SampleType) - 1; i >= 0; i-- {
- l = MarshalProtoOrigValueType(orig.SampleType[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- for i := len(orig.Sample) - 1; i >= 0; i-- {
- l = MarshalProtoOrigSample(orig.Sample[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.LocationIndices)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.LocationIndices[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x1a
- }
- if orig.TimeNanos != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.TimeNanos))
- pos--
- buf[pos] = 0x20
- }
- if orig.DurationNanos != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.DurationNanos))
- pos--
- buf[pos] = 0x28
- }
-
- l = MarshalProtoOrigValueType(&orig.PeriodType, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x32
-
- if orig.Period != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.Period))
- pos--
- buf[pos] = 0x38
- }
- l = len(orig.CommentStrindices)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.CommentStrindices[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x42
- }
- if orig.DefaultSampleTypeIndex != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.DefaultSampleTypeIndex))
- pos--
- buf[pos] = 0x48
- }
-
- l = MarshalProtoOrigProfileID(&orig.ProfileId, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x52
-
- if orig.DroppedAttributesCount != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
- pos--
- buf[pos] = 0x58
- }
- l = len(orig.OriginalPayloadFormat)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.OriginalPayloadFormat)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x62
- }
- l = len(orig.OriginalPayload)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.OriginalPayload)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x6a
- }
- l = len(orig.AttributeIndices)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x72
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigProfile(orig *otlpprofiles.Profile, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SampleType = append(orig.SampleType, NewOrigValueType())
- err = UnmarshalProtoOrigValueType(orig.SampleType[len(orig.SampleType)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Sample = append(orig.Sample, NewOrigSample())
- err = UnmarshalProtoOrigSample(orig.Sample[len(orig.Sample)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.LocationIndices = append(orig.LocationIndices, int32(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field LocationIndices", pos-startPos)
- }
-
- case 4:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.TimeNanos = int64(num)
-
- case 5:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.DurationNanos = int64(num)
-
- case 6:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigValueType(&orig.PeriodType, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 7:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.Period = int64(num)
- case 8:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.CommentStrindices = append(orig.CommentStrindices, int32(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field CommentStrindices", pos-startPos)
- }
-
- case 9:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleTypeIndex", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.DefaultSampleTypeIndex = int32(num)
-
- case 10:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigProfileID(&orig.ProfileId, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 11:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.DroppedAttributesCount = uint32(num)
-
- case 12:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.OriginalPayloadFormat = string(buf[startPos:pos])
-
- case 13:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- if length != 0 {
- orig.OriginalPayload = make([]byte, length)
- copy(orig.OriginalPayload, buf[startPos:pos])
- }
- case 14:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go
new file mode 100644
index 000000000..7bece6d02
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type ProfilesDataWrapper struct {
+ orig *ProfilesData
+ state *State
+}
+
+func GetProfilesDataOrig(ms ProfilesDataWrapper) *ProfilesData {
+ return ms.orig
+}
+
+func GetProfilesDataState(ms ProfilesDataWrapper) *State {
+ return ms.state
+}
+
+func NewProfilesDataWrapper(orig *ProfilesData, state *State) ProfilesDataWrapper {
+ return ProfilesDataWrapper{orig: orig, state: state}
+}
+
+func GenTestProfilesDataWrapper() ProfilesDataWrapper {
+ return NewProfilesDataWrapper(GenTestProfilesData(), NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdictionary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdictionary.go
deleted file mode 100644
index 5c14475bc..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdictionary.go
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolProfilesDictionary = sync.Pool{
- New: func() any {
- return &otlpprofiles.ProfilesDictionary{}
- },
- }
-)
-
-func NewOrigProfilesDictionary() *otlpprofiles.ProfilesDictionary {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.ProfilesDictionary{}
- }
- return protoPoolProfilesDictionary.Get().(*otlpprofiles.ProfilesDictionary)
-}
-
-func DeleteOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.MappingTable {
- DeleteOrigMapping(orig.MappingTable[i], true)
- }
- for i := range orig.LocationTable {
- DeleteOrigLocation(orig.LocationTable[i], true)
- }
- for i := range orig.FunctionTable {
- DeleteOrigFunction(orig.FunctionTable[i], true)
- }
- for i := range orig.LinkTable {
- DeleteOrigLink(orig.LinkTable[i], true)
- }
- for i := range orig.AttributeTable {
- DeleteOrigKeyValue(&orig.AttributeTable[i], false)
- }
- for i := range orig.AttributeUnits {
- DeleteOrigAttributeUnit(orig.AttributeUnits[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolProfilesDictionary.Put(orig)
- }
-}
-
-func CopyOrigProfilesDictionary(dest, src *otlpprofiles.ProfilesDictionary) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.MappingTable = CopyOrigMappingSlice(dest.MappingTable, src.MappingTable)
- dest.LocationTable = CopyOrigLocationSlice(dest.LocationTable, src.LocationTable)
- dest.FunctionTable = CopyOrigFunctionSlice(dest.FunctionTable, src.FunctionTable)
- dest.LinkTable = CopyOrigLinkSlice(dest.LinkTable, src.LinkTable)
- dest.StringTable = CopyOrigStringSlice(dest.StringTable, src.StringTable)
- dest.AttributeTable = CopyOrigKeyValueSlice(dest.AttributeTable, src.AttributeTable)
- dest.AttributeUnits = CopyOrigAttributeUnitSlice(dest.AttributeUnits, src.AttributeUnits)
-}
-
-func GenTestOrigProfilesDictionary() *otlpprofiles.ProfilesDictionary {
- orig := NewOrigProfilesDictionary()
- orig.MappingTable = GenerateOrigTestMappingSlice()
- orig.LocationTable = GenerateOrigTestLocationSlice()
- orig.FunctionTable = GenerateOrigTestFunctionSlice()
- orig.LinkTable = GenerateOrigTestLinkSlice()
- orig.StringTable = GenerateOrigTestStringSlice()
- orig.AttributeTable = GenerateOrigTestKeyValueSlice()
- orig.AttributeUnits = GenerateOrigTestAttributeUnitSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.MappingTable) > 0 {
- dest.WriteObjectField("mappingTable")
- dest.WriteArrayStart()
- MarshalJSONOrigMapping(orig.MappingTable[0], dest)
- for i := 1; i < len(orig.MappingTable); i++ {
- dest.WriteMore()
- MarshalJSONOrigMapping(orig.MappingTable[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if len(orig.LocationTable) > 0 {
- dest.WriteObjectField("locationTable")
- dest.WriteArrayStart()
- MarshalJSONOrigLocation(orig.LocationTable[0], dest)
- for i := 1; i < len(orig.LocationTable); i++ {
- dest.WriteMore()
- MarshalJSONOrigLocation(orig.LocationTable[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if len(orig.FunctionTable) > 0 {
- dest.WriteObjectField("functionTable")
- dest.WriteArrayStart()
- MarshalJSONOrigFunction(orig.FunctionTable[0], dest)
- for i := 1; i < len(orig.FunctionTable); i++ {
- dest.WriteMore()
- MarshalJSONOrigFunction(orig.FunctionTable[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if len(orig.LinkTable) > 0 {
- dest.WriteObjectField("linkTable")
- dest.WriteArrayStart()
- MarshalJSONOrigLink(orig.LinkTable[0], dest)
- for i := 1; i < len(orig.LinkTable); i++ {
- dest.WriteMore()
- MarshalJSONOrigLink(orig.LinkTable[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if len(orig.StringTable) > 0 {
- dest.WriteObjectField("stringTable")
- dest.WriteArrayStart()
- dest.WriteString(orig.StringTable[0])
- for i := 1; i < len(orig.StringTable); i++ {
- dest.WriteMore()
- dest.WriteString(orig.StringTable[i])
- }
- dest.WriteArrayEnd()
- }
- if len(orig.AttributeTable) > 0 {
- dest.WriteObjectField("attributeTable")
- dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.AttributeTable[0], dest)
- for i := 1; i < len(orig.AttributeTable); i++ {
- dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.AttributeTable[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if len(orig.AttributeUnits) > 0 {
- dest.WriteObjectField("attributeUnits")
- dest.WriteArrayStart()
- MarshalJSONOrigAttributeUnit(orig.AttributeUnits[0], dest)
- for i := 1; i < len(orig.AttributeUnits); i++ {
- dest.WriteMore()
- MarshalJSONOrigAttributeUnit(orig.AttributeUnits[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigProfilesDictionary unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "mappingTable", "mapping_table":
- for iter.ReadArray() {
- orig.MappingTable = append(orig.MappingTable, NewOrigMapping())
- UnmarshalJSONOrigMapping(orig.MappingTable[len(orig.MappingTable)-1], iter)
- }
-
- case "locationTable", "location_table":
- for iter.ReadArray() {
- orig.LocationTable = append(orig.LocationTable, NewOrigLocation())
- UnmarshalJSONOrigLocation(orig.LocationTable[len(orig.LocationTable)-1], iter)
- }
-
- case "functionTable", "function_table":
- for iter.ReadArray() {
- orig.FunctionTable = append(orig.FunctionTable, NewOrigFunction())
- UnmarshalJSONOrigFunction(orig.FunctionTable[len(orig.FunctionTable)-1], iter)
- }
-
- case "linkTable", "link_table":
- for iter.ReadArray() {
- orig.LinkTable = append(orig.LinkTable, NewOrigLink())
- UnmarshalJSONOrigLink(orig.LinkTable[len(orig.LinkTable)-1], iter)
- }
-
- case "stringTable", "string_table":
- for iter.ReadArray() {
- orig.StringTable = append(orig.StringTable, iter.ReadString())
- }
-
- case "attributeTable", "attribute_table":
- for iter.ReadArray() {
- orig.AttributeTable = append(orig.AttributeTable, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.AttributeTable[len(orig.AttributeTable)-1], iter)
- }
-
- case "attributeUnits", "attribute_units":
- for iter.ReadArray() {
- orig.AttributeUnits = append(orig.AttributeUnits, NewOrigAttributeUnit())
- UnmarshalJSONOrigAttributeUnit(orig.AttributeUnits[len(orig.AttributeUnits)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary) int {
- var n int
- var l int
- _ = l
- for i := range orig.MappingTable {
- l = SizeProtoOrigMapping(orig.MappingTable[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.LocationTable {
- l = SizeProtoOrigLocation(orig.LocationTable[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.FunctionTable {
- l = SizeProtoOrigFunction(orig.FunctionTable[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.LinkTable {
- l = SizeProtoOrigLink(orig.LinkTable[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for _, s := range orig.StringTable {
- l = len(s)
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.AttributeTable {
- l = SizeProtoOrigKeyValue(&orig.AttributeTable[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- for i := range orig.AttributeUnits {
- l = SizeProtoOrigAttributeUnit(orig.AttributeUnits[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.MappingTable) - 1; i >= 0; i-- {
- l = MarshalProtoOrigMapping(orig.MappingTable[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- for i := len(orig.LocationTable) - 1; i >= 0; i-- {
- l = MarshalProtoOrigLocation(orig.LocationTable[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- for i := len(orig.FunctionTable) - 1; i >= 0; i-- {
- l = MarshalProtoOrigFunction(orig.FunctionTable[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- for i := len(orig.LinkTable) - 1; i >= 0; i-- {
- l = MarshalProtoOrigLink(orig.LinkTable[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x22
- }
- for i := len(orig.StringTable) - 1; i >= 0; i-- {
- l = len(orig.StringTable[i])
- pos -= l
- copy(buf[pos:], orig.StringTable[i])
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x2a
- }
- for i := len(orig.AttributeTable) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.AttributeTable[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x32
- }
- for i := len(orig.AttributeUnits) - 1; i >= 0; i-- {
- l = MarshalProtoOrigAttributeUnit(orig.AttributeUnits[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x3a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.MappingTable = append(orig.MappingTable, NewOrigMapping())
- err = UnmarshalProtoOrigMapping(orig.MappingTable[len(orig.MappingTable)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.LocationTable = append(orig.LocationTable, NewOrigLocation())
- err = UnmarshalProtoOrigLocation(orig.LocationTable[len(orig.LocationTable)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.FunctionTable = append(orig.FunctionTable, NewOrigFunction())
- err = UnmarshalProtoOrigFunction(orig.FunctionTable[len(orig.FunctionTable)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 4:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.LinkTable = append(orig.LinkTable, NewOrigLink())
- err = UnmarshalProtoOrigLink(orig.LinkTable[len(orig.LinkTable)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 5:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.StringTable = append(orig.StringTable, string(buf[startPos:pos]))
-
- case 6:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.AttributeTable = append(orig.AttributeTable, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.AttributeTable[len(orig.AttributeTable)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 7:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.AttributeUnits = append(orig.AttributeUnits, NewOrigAttributeUnit())
- err = UnmarshalProtoOrigAttributeUnit(orig.AttributeUnits[len(orig.AttributeUnits)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profileslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profileslice.go
deleted file mode 100644
index 84ec00515..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profileslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigProfileSlice(dest, src []*otlpprofiles.Profile) []*otlpprofiles.Profile {
- var newDest []*otlpprofiles.Profile
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.Profile, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigProfile()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigProfile(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigProfile()
- }
- }
- for i := range src {
- CopyOrigProfile(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestProfileSlice() []*otlpprofiles.Profile {
- orig := make([]*otlpprofiles.Profile, 5)
- orig[0] = NewOrigProfile()
- orig[1] = GenTestOrigProfile()
- orig[2] = NewOrigProfile()
- orig[3] = GenTestOrigProfile()
- orig[4] = NewOrigProfile()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go
index 6e45dbdec..1d6cabfeb 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go
@@ -6,251 +6,23 @@
package internal
-import (
- "fmt"
- "sync"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-type Resource struct {
- orig *otlpresource.Resource
+type ResourceWrapper struct {
+ orig *Resource
state *State
}
-func GetOrigResource(ms Resource) *otlpresource.Resource {
+func GetResourceOrig(ms ResourceWrapper) *Resource {
return ms.orig
}
-func GetResourceState(ms Resource) *State {
+func GetResourceState(ms ResourceWrapper) *State {
return ms.state
}
-func NewResource(orig *otlpresource.Resource, state *State) Resource {
- return Resource{orig: orig, state: state}
-}
-
-var (
- protoPoolResource = sync.Pool{
- New: func() any {
- return &otlpresource.Resource{}
- },
- }
-)
-
-func NewOrigResource() *otlpresource.Resource {
- if !UseProtoPooling.IsEnabled() {
- return &otlpresource.Resource{}
- }
- return protoPoolResource.Get().(*otlpresource.Resource)
+func NewResourceWrapper(orig *Resource, state *State) ResourceWrapper {
+ return ResourceWrapper{orig: orig, state: state}
}
-func DeleteOrigResource(orig *otlpresource.Resource, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.Attributes {
- DeleteOrigKeyValue(&orig.Attributes[i], false)
- }
- for i := range orig.EntityRefs {
- DeleteOrigEntityRef(orig.EntityRefs[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolResource.Put(orig)
- }
-}
-
-func CopyOrigResource(dest, src *otlpresource.Resource) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Attributes = CopyOrigKeyValueSlice(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
- dest.EntityRefs = CopyOrigEntityRefSlice(dest.EntityRefs, src.EntityRefs)
-}
-
-func GenTestOrigResource() *otlpresource.Resource {
- orig := NewOrigResource()
- orig.Attributes = GenerateOrigTestKeyValueSlice()
- orig.DroppedAttributesCount = uint32(13)
- orig.EntityRefs = GenerateOrigTestEntityRefSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigResource(orig *otlpresource.Resource, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.Attributes) > 0 {
- dest.WriteObjectField("attributes")
- dest.WriteArrayStart()
- MarshalJSONOrigKeyValue(&orig.Attributes[0], dest)
- for i := 1; i < len(orig.Attributes); i++ {
- dest.WriteMore()
- MarshalJSONOrigKeyValue(&orig.Attributes[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.DroppedAttributesCount != uint32(0) {
- dest.WriteObjectField("droppedAttributesCount")
- dest.WriteUint32(orig.DroppedAttributesCount)
- }
- if len(orig.EntityRefs) > 0 {
- dest.WriteObjectField("entityRefs")
- dest.WriteArrayStart()
- MarshalJSONOrigEntityRef(orig.EntityRefs[0], dest)
- for i := 1; i < len(orig.EntityRefs); i++ {
- dest.WriteMore()
- MarshalJSONOrigEntityRef(orig.EntityRefs[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigResource unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigResource(orig *otlpresource.Resource, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "attributes":
- for iter.ReadArray() {
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- UnmarshalJSONOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
- }
-
- case "droppedAttributesCount", "dropped_attributes_count":
- orig.DroppedAttributesCount = iter.ReadUint32()
- case "entityRefs", "entity_refs":
- for iter.ReadArray() {
- orig.EntityRefs = append(orig.EntityRefs, NewOrigEntityRef())
- UnmarshalJSONOrigEntityRef(orig.EntityRefs[len(orig.EntityRefs)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigResource(orig *otlpresource.Resource) int {
- var n int
- var l int
- _ = l
- for i := range orig.Attributes {
- l = SizeProtoOrigKeyValue(&orig.Attributes[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.DroppedAttributesCount != 0 {
- n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
- }
- for i := range orig.EntityRefs {
- l = SizeProtoOrigEntityRef(orig.EntityRefs[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigResource(orig *otlpresource.Resource, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.Attributes) - 1; i >= 0; i-- {
- l = MarshalProtoOrigKeyValue(&orig.Attributes[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- if orig.DroppedAttributesCount != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
- pos--
- buf[pos] = 0x10
- }
- for i := len(orig.EntityRefs) - 1; i >= 0; i-- {
- l = MarshalProtoOrigEntityRef(orig.EntityRefs[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigResource(orig *otlpresource.Resource, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
- err = UnmarshalProtoOrigKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.DroppedAttributesCount = uint32(num)
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.EntityRefs = append(orig.EntityRefs, NewOrigEntityRef())
- err = UnmarshalProtoOrigEntityRef(orig.EntityRefs[len(orig.EntityRefs)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
+func GenTestResourceWrapper() ResourceWrapper {
+ return NewResourceWrapper(GenTestResource(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogs.go
deleted file mode 100644
index 4c78e1246..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogs.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolResourceLogs = sync.Pool{
- New: func() any {
- return &otlplogs.ResourceLogs{}
- },
- }
-)
-
-func NewOrigResourceLogs() *otlplogs.ResourceLogs {
- if !UseProtoPooling.IsEnabled() {
- return &otlplogs.ResourceLogs{}
- }
- return protoPoolResourceLogs.Get().(*otlplogs.ResourceLogs)
-}
-
-func DeleteOrigResourceLogs(orig *otlplogs.ResourceLogs, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigResource(&orig.Resource, false)
- for i := range orig.ScopeLogs {
- DeleteOrigScopeLogs(orig.ScopeLogs[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolResourceLogs.Put(orig)
- }
-}
-
-func CopyOrigResourceLogs(dest, src *otlplogs.ResourceLogs) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigResource(&dest.Resource, &src.Resource)
- dest.ScopeLogs = CopyOrigScopeLogsSlice(dest.ScopeLogs, src.ScopeLogs)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigResourceLogs() *otlplogs.ResourceLogs {
- orig := NewOrigResourceLogs()
- orig.Resource = *GenTestOrigResource()
- orig.ScopeLogs = GenerateOrigTestScopeLogsSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigResourceLogs(orig *otlplogs.ResourceLogs, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("resource")
- MarshalJSONOrigResource(&orig.Resource, dest)
- if len(orig.ScopeLogs) > 0 {
- dest.WriteObjectField("scopeLogs")
- dest.WriteArrayStart()
- MarshalJSONOrigScopeLogs(orig.ScopeLogs[0], dest)
- for i := 1; i < len(orig.ScopeLogs); i++ {
- dest.WriteMore()
- MarshalJSONOrigScopeLogs(orig.ScopeLogs[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigResourceLogs unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigResourceLogs(orig *otlplogs.ResourceLogs, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resource":
- UnmarshalJSONOrigResource(&orig.Resource, iter)
- case "scopeLogs", "scope_logs":
- for iter.ReadArray() {
- orig.ScopeLogs = append(orig.ScopeLogs, NewOrigScopeLogs())
- UnmarshalJSONOrigScopeLogs(orig.ScopeLogs[len(orig.ScopeLogs)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigResourceLogs(orig *otlplogs.ResourceLogs) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigResource(&orig.Resource)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.ScopeLogs {
- l = SizeProtoOrigScopeLogs(orig.ScopeLogs[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigResourceLogs(orig *otlplogs.ResourceLogs, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigResource(&orig.Resource, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.ScopeLogs) - 1; i >= 0; i-- {
- l = MarshalProtoOrigScopeLogs(orig.ScopeLogs[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigResourceLogs(orig *otlplogs.ResourceLogs, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ScopeLogs = append(orig.ScopeLogs, NewOrigScopeLogs())
- err = UnmarshalProtoOrigScopeLogs(orig.ScopeLogs[len(orig.ScopeLogs)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogsslice.go
deleted file mode 100644
index dcf0a0afb..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcelogsslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
-func CopyOrigResourceLogsSlice(dest, src []*otlplogs.ResourceLogs) []*otlplogs.ResourceLogs {
- var newDest []*otlplogs.ResourceLogs
- if cap(dest) < len(src) {
- newDest = make([]*otlplogs.ResourceLogs, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceLogs()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigResourceLogs(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceLogs()
- }
- }
- for i := range src {
- CopyOrigResourceLogs(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestResourceLogsSlice() []*otlplogs.ResourceLogs {
- orig := make([]*otlplogs.ResourceLogs, 5)
- orig[0] = NewOrigResourceLogs()
- orig[1] = GenTestOrigResourceLogs()
- orig[2] = NewOrigResourceLogs()
- orig[3] = GenTestOrigResourceLogs()
- orig[4] = NewOrigResourceLogs()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetrics.go
deleted file mode 100644
index a412cfee8..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetrics.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolResourceMetrics = sync.Pool{
- New: func() any {
- return &otlpmetrics.ResourceMetrics{}
- },
- }
-)
-
-func NewOrigResourceMetrics() *otlpmetrics.ResourceMetrics {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.ResourceMetrics{}
- }
- return protoPoolResourceMetrics.Get().(*otlpmetrics.ResourceMetrics)
-}
-
-func DeleteOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigResource(&orig.Resource, false)
- for i := range orig.ScopeMetrics {
- DeleteOrigScopeMetrics(orig.ScopeMetrics[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolResourceMetrics.Put(orig)
- }
-}
-
-func CopyOrigResourceMetrics(dest, src *otlpmetrics.ResourceMetrics) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigResource(&dest.Resource, &src.Resource)
- dest.ScopeMetrics = CopyOrigScopeMetricsSlice(dest.ScopeMetrics, src.ScopeMetrics)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigResourceMetrics() *otlpmetrics.ResourceMetrics {
- orig := NewOrigResourceMetrics()
- orig.Resource = *GenTestOrigResource()
- orig.ScopeMetrics = GenerateOrigTestScopeMetricsSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("resource")
- MarshalJSONOrigResource(&orig.Resource, dest)
- if len(orig.ScopeMetrics) > 0 {
- dest.WriteObjectField("scopeMetrics")
- dest.WriteArrayStart()
- MarshalJSONOrigScopeMetrics(orig.ScopeMetrics[0], dest)
- for i := 1; i < len(orig.ScopeMetrics); i++ {
- dest.WriteMore()
- MarshalJSONOrigScopeMetrics(orig.ScopeMetrics[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigResourceMetrics unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resource":
- UnmarshalJSONOrigResource(&orig.Resource, iter)
- case "scopeMetrics", "scope_metrics":
- for iter.ReadArray() {
- orig.ScopeMetrics = append(orig.ScopeMetrics, NewOrigScopeMetrics())
- UnmarshalJSONOrigScopeMetrics(orig.ScopeMetrics[len(orig.ScopeMetrics)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigResource(&orig.Resource)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.ScopeMetrics {
- l = SizeProtoOrigScopeMetrics(orig.ScopeMetrics[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigResource(&orig.Resource, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.ScopeMetrics) - 1; i >= 0; i-- {
- l = MarshalProtoOrigScopeMetrics(orig.ScopeMetrics[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigResourceMetrics(orig *otlpmetrics.ResourceMetrics, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ScopeMetrics = append(orig.ScopeMetrics, NewOrigScopeMetrics())
- err = UnmarshalProtoOrigScopeMetrics(orig.ScopeMetrics[len(orig.ScopeMetrics)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetricsslice.go
deleted file mode 100644
index 7bd0db8fc..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcemetricsslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigResourceMetricsSlice(dest, src []*otlpmetrics.ResourceMetrics) []*otlpmetrics.ResourceMetrics {
- var newDest []*otlpmetrics.ResourceMetrics
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.ResourceMetrics, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceMetrics()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigResourceMetrics(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceMetrics()
- }
- }
- for i := range src {
- CopyOrigResourceMetrics(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestResourceMetricsSlice() []*otlpmetrics.ResourceMetrics {
- orig := make([]*otlpmetrics.ResourceMetrics, 5)
- orig[0] = NewOrigResourceMetrics()
- orig[1] = GenTestOrigResourceMetrics()
- orig[2] = NewOrigResourceMetrics()
- orig[3] = GenTestOrigResourceMetrics()
- orig[4] = NewOrigResourceMetrics()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofiles.go
deleted file mode 100644
index 5481ab286..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofiles.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolResourceProfiles = sync.Pool{
- New: func() any {
- return &otlpprofiles.ResourceProfiles{}
- },
- }
-)
-
-func NewOrigResourceProfiles() *otlpprofiles.ResourceProfiles {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.ResourceProfiles{}
- }
- return protoPoolResourceProfiles.Get().(*otlpprofiles.ResourceProfiles)
-}
-
-func DeleteOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigResource(&orig.Resource, false)
- for i := range orig.ScopeProfiles {
- DeleteOrigScopeProfiles(orig.ScopeProfiles[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolResourceProfiles.Put(orig)
- }
-}
-
-func CopyOrigResourceProfiles(dest, src *otlpprofiles.ResourceProfiles) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigResource(&dest.Resource, &src.Resource)
- dest.ScopeProfiles = CopyOrigScopeProfilesSlice(dest.ScopeProfiles, src.ScopeProfiles)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigResourceProfiles() *otlpprofiles.ResourceProfiles {
- orig := NewOrigResourceProfiles()
- orig.Resource = *GenTestOrigResource()
- orig.ScopeProfiles = GenerateOrigTestScopeProfilesSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("resource")
- MarshalJSONOrigResource(&orig.Resource, dest)
- if len(orig.ScopeProfiles) > 0 {
- dest.WriteObjectField("scopeProfiles")
- dest.WriteArrayStart()
- MarshalJSONOrigScopeProfiles(orig.ScopeProfiles[0], dest)
- for i := 1; i < len(orig.ScopeProfiles); i++ {
- dest.WriteMore()
- MarshalJSONOrigScopeProfiles(orig.ScopeProfiles[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigResourceProfiles unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resource":
- UnmarshalJSONOrigResource(&orig.Resource, iter)
- case "scopeProfiles", "scope_profiles":
- for iter.ReadArray() {
- orig.ScopeProfiles = append(orig.ScopeProfiles, NewOrigScopeProfiles())
- UnmarshalJSONOrigScopeProfiles(orig.ScopeProfiles[len(orig.ScopeProfiles)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigResource(&orig.Resource)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.ScopeProfiles {
- l = SizeProtoOrigScopeProfiles(orig.ScopeProfiles[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigResource(&orig.Resource, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.ScopeProfiles) - 1; i >= 0; i-- {
- l = MarshalProtoOrigScopeProfiles(orig.ScopeProfiles[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigResourceProfiles(orig *otlpprofiles.ResourceProfiles, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ScopeProfiles = append(orig.ScopeProfiles, NewOrigScopeProfiles())
- err = UnmarshalProtoOrigScopeProfiles(orig.ScopeProfiles[len(orig.ScopeProfiles)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofilesslice.go
deleted file mode 100644
index 978753cdc..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourceprofilesslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigResourceProfilesSlice(dest, src []*otlpprofiles.ResourceProfiles) []*otlpprofiles.ResourceProfiles {
- var newDest []*otlpprofiles.ResourceProfiles
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.ResourceProfiles, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceProfiles()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigResourceProfiles(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceProfiles()
- }
- }
- for i := range src {
- CopyOrigResourceProfiles(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestResourceProfilesSlice() []*otlpprofiles.ResourceProfiles {
- orig := make([]*otlpprofiles.ResourceProfiles, 5)
- orig[0] = NewOrigResourceProfiles()
- orig[1] = GenTestOrigResourceProfiles()
- orig[2] = NewOrigResourceProfiles()
- orig[3] = GenTestOrigResourceProfiles()
- orig[4] = NewOrigResourceProfiles()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespans.go
deleted file mode 100644
index 3599e21cd..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespans.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolResourceSpans = sync.Pool{
- New: func() any {
- return &otlptrace.ResourceSpans{}
- },
- }
-)
-
-func NewOrigResourceSpans() *otlptrace.ResourceSpans {
- if !UseProtoPooling.IsEnabled() {
- return &otlptrace.ResourceSpans{}
- }
- return protoPoolResourceSpans.Get().(*otlptrace.ResourceSpans)
-}
-
-func DeleteOrigResourceSpans(orig *otlptrace.ResourceSpans, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigResource(&orig.Resource, false)
- for i := range orig.ScopeSpans {
- DeleteOrigScopeSpans(orig.ScopeSpans[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolResourceSpans.Put(orig)
- }
-}
-
-func CopyOrigResourceSpans(dest, src *otlptrace.ResourceSpans) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigResource(&dest.Resource, &src.Resource)
- dest.ScopeSpans = CopyOrigScopeSpansSlice(dest.ScopeSpans, src.ScopeSpans)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigResourceSpans() *otlptrace.ResourceSpans {
- orig := NewOrigResourceSpans()
- orig.Resource = *GenTestOrigResource()
- orig.ScopeSpans = GenerateOrigTestScopeSpansSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigResourceSpans(orig *otlptrace.ResourceSpans, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("resource")
- MarshalJSONOrigResource(&orig.Resource, dest)
- if len(orig.ScopeSpans) > 0 {
- dest.WriteObjectField("scopeSpans")
- dest.WriteArrayStart()
- MarshalJSONOrigScopeSpans(orig.ScopeSpans[0], dest)
- for i := 1; i < len(orig.ScopeSpans); i++ {
- dest.WriteMore()
- MarshalJSONOrigScopeSpans(orig.ScopeSpans[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigResourceSpans unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigResourceSpans(orig *otlptrace.ResourceSpans, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "resource":
- UnmarshalJSONOrigResource(&orig.Resource, iter)
- case "scopeSpans", "scope_spans":
- for iter.ReadArray() {
- orig.ScopeSpans = append(orig.ScopeSpans, NewOrigScopeSpans())
- UnmarshalJSONOrigScopeSpans(orig.ScopeSpans[len(orig.ScopeSpans)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigResourceSpans(orig *otlptrace.ResourceSpans) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigResource(&orig.Resource)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.ScopeSpans {
- l = SizeProtoOrigScopeSpans(orig.ScopeSpans[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigResourceSpans(orig *otlptrace.ResourceSpans, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigResource(&orig.Resource, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.ScopeSpans) - 1; i >= 0; i-- {
- l = MarshalProtoOrigScopeSpans(orig.ScopeSpans[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigResourceSpans(orig *otlptrace.ResourceSpans, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigResource(&orig.Resource, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.ScopeSpans = append(orig.ScopeSpans, NewOrigScopeSpans())
- err = UnmarshalProtoOrigScopeSpans(orig.ScopeSpans[len(orig.ScopeSpans)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespansslice.go
deleted file mode 100644
index 3128ef758..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resourcespansslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-func CopyOrigResourceSpansSlice(dest, src []*otlptrace.ResourceSpans) []*otlptrace.ResourceSpans {
- var newDest []*otlptrace.ResourceSpans
- if cap(dest) < len(src) {
- newDest = make([]*otlptrace.ResourceSpans, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceSpans()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigResourceSpans(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigResourceSpans()
- }
- }
- for i := range src {
- CopyOrigResourceSpans(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestResourceSpansSlice() []*otlptrace.ResourceSpans {
- orig := make([]*otlptrace.ResourceSpans, 5)
- orig[0] = NewOrigResourceSpans()
- orig[1] = GenTestOrigResourceSpans()
- orig[2] = NewOrigResourceSpans()
- orig[3] = GenTestOrigResourceSpans()
- orig[4] = NewOrigResourceSpans()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sample.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sample.go
deleted file mode 100644
index 59fd9be14..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sample.go
+++ /dev/null
@@ -1,402 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolSample = sync.Pool{
- New: func() any {
- return &otlpprofiles.Sample{}
- },
- }
- ProtoPoolSample_LinkIndex = sync.Pool{
- New: func() any {
- return &otlpprofiles.Sample_LinkIndex{}
- },
- }
-)
-
-func NewOrigSample() *otlpprofiles.Sample {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.Sample{}
- }
- return protoPoolSample.Get().(*otlpprofiles.Sample)
-}
-
-func DeleteOrigSample(orig *otlpprofiles.Sample, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- switch ov := orig.LinkIndex_.(type) {
- case *otlpprofiles.Sample_LinkIndex:
- if UseProtoPooling.IsEnabled() {
- ov.LinkIndex = int32(0)
- ProtoPoolSample_LinkIndex.Put(ov)
- }
-
- }
-
- orig.Reset()
- if nullable {
- protoPoolSample.Put(orig)
- }
-}
-
-func CopyOrigSample(dest, src *otlpprofiles.Sample) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.LocationsStartIndex = src.LocationsStartIndex
- dest.LocationsLength = src.LocationsLength
- dest.Value = CopyOrigInt64Slice(dest.Value, src.Value)
- dest.AttributeIndices = CopyOrigInt32Slice(dest.AttributeIndices, src.AttributeIndices)
- if srcLinkIndex, ok := src.LinkIndex_.(*otlpprofiles.Sample_LinkIndex); ok {
- destLinkIndex, ok := dest.LinkIndex_.(*otlpprofiles.Sample_LinkIndex)
- if !ok {
- destLinkIndex = &otlpprofiles.Sample_LinkIndex{}
- dest.LinkIndex_ = destLinkIndex
- }
- destLinkIndex.LinkIndex = srcLinkIndex.LinkIndex
- } else {
- dest.LinkIndex_ = nil
- }
- dest.TimestampsUnixNano = CopyOrigUint64Slice(dest.TimestampsUnixNano, src.TimestampsUnixNano)
-}
-
-func GenTestOrigSample() *otlpprofiles.Sample {
- orig := NewOrigSample()
- orig.LocationsStartIndex = int32(13)
- orig.LocationsLength = int32(13)
- orig.Value = GenerateOrigTestInt64Slice()
- orig.AttributeIndices = GenerateOrigTestInt32Slice()
- orig.LinkIndex_ = &otlpprofiles.Sample_LinkIndex{LinkIndex: int32(13)}
- orig.TimestampsUnixNano = GenerateOrigTestUint64Slice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSample(orig *otlpprofiles.Sample, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.LocationsStartIndex != int32(0) {
- dest.WriteObjectField("locationsStartIndex")
- dest.WriteInt32(orig.LocationsStartIndex)
- }
- if orig.LocationsLength != int32(0) {
- dest.WriteObjectField("locationsLength")
- dest.WriteInt32(orig.LocationsLength)
- }
- if len(orig.Value) > 0 {
- dest.WriteObjectField("value")
- dest.WriteArrayStart()
- dest.WriteInt64(orig.Value[0])
- for i := 1; i < len(orig.Value); i++ {
- dest.WriteMore()
- dest.WriteInt64(orig.Value[i])
- }
- dest.WriteArrayEnd()
- }
- if len(orig.AttributeIndices) > 0 {
- dest.WriteObjectField("attributeIndices")
- dest.WriteArrayStart()
- dest.WriteInt32(orig.AttributeIndices[0])
- for i := 1; i < len(orig.AttributeIndices); i++ {
- dest.WriteMore()
- dest.WriteInt32(orig.AttributeIndices[i])
- }
- dest.WriteArrayEnd()
- }
- if orig, ok := orig.LinkIndex_.(*otlpprofiles.Sample_LinkIndex); ok {
- dest.WriteObjectField("linkIndex")
- dest.WriteInt32(orig.LinkIndex)
- }
- if len(orig.TimestampsUnixNano) > 0 {
- dest.WriteObjectField("timestampsUnixNano")
- dest.WriteArrayStart()
- dest.WriteUint64(orig.TimestampsUnixNano[0])
- for i := 1; i < len(orig.TimestampsUnixNano); i++ {
- dest.WriteMore()
- dest.WriteUint64(orig.TimestampsUnixNano[i])
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigSample unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSample(orig *otlpprofiles.Sample, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "locationsStartIndex", "locations_start_index":
- orig.LocationsStartIndex = iter.ReadInt32()
- case "locationsLength", "locations_length":
- orig.LocationsLength = iter.ReadInt32()
- case "value":
- for iter.ReadArray() {
- orig.Value = append(orig.Value, iter.ReadInt64())
- }
-
- case "attributeIndices", "attribute_indices":
- for iter.ReadArray() {
- orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
- }
-
- case "linkIndex", "link_index":
- {
- var ov *otlpprofiles.Sample_LinkIndex
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpprofiles.Sample_LinkIndex{}
- } else {
- ov = ProtoPoolSample_LinkIndex.Get().(*otlpprofiles.Sample_LinkIndex)
- }
- ov.LinkIndex = iter.ReadInt32()
- orig.LinkIndex_ = ov
- }
-
- case "timestampsUnixNano", "timestamps_unix_nano":
- for iter.ReadArray() {
- orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, iter.ReadUint64())
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigSample(orig *otlpprofiles.Sample) int {
- var n int
- var l int
- _ = l
- if orig.LocationsStartIndex != 0 {
- n += 1 + proto.Sov(uint64(orig.LocationsStartIndex))
- }
- if orig.LocationsLength != 0 {
- n += 1 + proto.Sov(uint64(orig.LocationsLength))
- }
- if len(orig.Value) > 0 {
- l = 0
- for _, e := range orig.Value {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if len(orig.AttributeIndices) > 0 {
- l = 0
- for _, e := range orig.AttributeIndices {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig, ok := orig.LinkIndex_.(*otlpprofiles.Sample_LinkIndex); ok {
- _ = orig
- n += 1 + proto.Sov(uint64(orig.LinkIndex))
- }
- if len(orig.TimestampsUnixNano) > 0 {
- l = 0
- for _, e := range orig.TimestampsUnixNano {
- l += proto.Sov(uint64(e))
- }
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.LocationsStartIndex != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.LocationsStartIndex))
- pos--
- buf[pos] = 0x8
- }
- if orig.LocationsLength != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.LocationsLength))
- pos--
- buf[pos] = 0x10
- }
- l = len(orig.Value)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.Value[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x1a
- }
- l = len(orig.AttributeIndices)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x22
- }
- if orig, ok := orig.LinkIndex_.(*otlpprofiles.Sample_LinkIndex); ok {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.LinkIndex))
- pos--
- buf[pos] = 0x28
- }
- l = len(orig.TimestampsUnixNano)
- if l > 0 {
- endPos := pos
- for i := l - 1; i >= 0; i-- {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.TimestampsUnixNano[i]))
- }
- pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
- pos--
- buf[pos] = 0x32
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigSample(orig *otlpprofiles.Sample, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.LocationsStartIndex = int32(num)
-
- case 2:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.LocationsLength = int32(num)
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.Value = append(orig.Value, int64(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field Value", pos-startPos)
- }
- case 4:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
- }
-
- case 5:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
- var ov *otlpprofiles.Sample_LinkIndex
- if !UseProtoPooling.IsEnabled() {
- ov = &otlpprofiles.Sample_LinkIndex{}
- } else {
- ov = ProtoPoolSample_LinkIndex.Get().(*otlpprofiles.Sample_LinkIndex)
- }
- ov.LinkIndex = int32(num)
- orig.LinkIndex_ = ov
- case 6:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- var num uint64
- for startPos < pos {
- num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
- if err != nil {
- return err
- }
- orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, uint64(num))
- }
- if startPos != pos {
- return fmt.Errorf("proto: invalid field len = %d for field TimestampsUnixNano", pos-startPos)
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sampleslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sampleslice.go
deleted file mode 100644
index a2dfca321..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_sampleslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigSampleSlice(dest, src []*otlpprofiles.Sample) []*otlpprofiles.Sample {
- var newDest []*otlpprofiles.Sample
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.Sample, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSample()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigSample(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSample()
- }
- }
- for i := range src {
- CopyOrigSample(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestSampleSlice() []*otlpprofiles.Sample {
- orig := make([]*otlpprofiles.Sample, 5)
- orig[0] = NewOrigSample()
- orig[1] = GenTestOrigSample()
- orig[2] = NewOrigSample()
- orig[3] = GenTestOrigSample()
- orig[4] = NewOrigSample()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogs.go
deleted file mode 100644
index c98a86112..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogs.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolScopeLogs = sync.Pool{
- New: func() any {
- return &otlplogs.ScopeLogs{}
- },
- }
-)
-
-func NewOrigScopeLogs() *otlplogs.ScopeLogs {
- if !UseProtoPooling.IsEnabled() {
- return &otlplogs.ScopeLogs{}
- }
- return protoPoolScopeLogs.Get().(*otlplogs.ScopeLogs)
-}
-
-func DeleteOrigScopeLogs(orig *otlplogs.ScopeLogs, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigInstrumentationScope(&orig.Scope, false)
- for i := range orig.LogRecords {
- DeleteOrigLogRecord(orig.LogRecords[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolScopeLogs.Put(orig)
- }
-}
-
-func CopyOrigScopeLogs(dest, src *otlplogs.ScopeLogs) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigInstrumentationScope(&dest.Scope, &src.Scope)
- dest.LogRecords = CopyOrigLogRecordSlice(dest.LogRecords, src.LogRecords)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigScopeLogs() *otlplogs.ScopeLogs {
- orig := NewOrigScopeLogs()
- orig.Scope = *GenTestOrigInstrumentationScope()
- orig.LogRecords = GenerateOrigTestLogRecordSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigScopeLogs(orig *otlplogs.ScopeLogs, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("scope")
- MarshalJSONOrigInstrumentationScope(&orig.Scope, dest)
- if len(orig.LogRecords) > 0 {
- dest.WriteObjectField("logRecords")
- dest.WriteArrayStart()
- MarshalJSONOrigLogRecord(orig.LogRecords[0], dest)
- for i := 1; i < len(orig.LogRecords); i++ {
- dest.WriteMore()
- MarshalJSONOrigLogRecord(orig.LogRecords[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigScopeLogs unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigScopeLogs(orig *otlplogs.ScopeLogs, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "scope":
- UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter)
- case "logRecords", "log_records":
- for iter.ReadArray() {
- orig.LogRecords = append(orig.LogRecords, NewOrigLogRecord())
- UnmarshalJSONOrigLogRecord(orig.LogRecords[len(orig.LogRecords)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigScopeLogs(orig *otlplogs.ScopeLogs) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigInstrumentationScope(&orig.Scope)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.LogRecords {
- l = SizeProtoOrigLogRecord(orig.LogRecords[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigScopeLogs(orig *otlplogs.ScopeLogs, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.LogRecords) - 1; i >= 0; i-- {
- l = MarshalProtoOrigLogRecord(orig.LogRecords[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigScopeLogs(orig *otlplogs.ScopeLogs, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.LogRecords = append(orig.LogRecords, NewOrigLogRecord())
- err = UnmarshalProtoOrigLogRecord(orig.LogRecords[len(orig.LogRecords)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogsslice.go
deleted file mode 100644
index ed0b6cc78..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopelogsslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
-func CopyOrigScopeLogsSlice(dest, src []*otlplogs.ScopeLogs) []*otlplogs.ScopeLogs {
- var newDest []*otlplogs.ScopeLogs
- if cap(dest) < len(src) {
- newDest = make([]*otlplogs.ScopeLogs, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeLogs()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigScopeLogs(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeLogs()
- }
- }
- for i := range src {
- CopyOrigScopeLogs(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestScopeLogsSlice() []*otlplogs.ScopeLogs {
- orig := make([]*otlplogs.ScopeLogs, 5)
- orig[0] = NewOrigScopeLogs()
- orig[1] = GenTestOrigScopeLogs()
- orig[2] = NewOrigScopeLogs()
- orig[3] = GenTestOrigScopeLogs()
- orig[4] = NewOrigScopeLogs()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetrics.go
deleted file mode 100644
index 59e05b32f..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetrics.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolScopeMetrics = sync.Pool{
- New: func() any {
- return &otlpmetrics.ScopeMetrics{}
- },
- }
-)
-
-func NewOrigScopeMetrics() *otlpmetrics.ScopeMetrics {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.ScopeMetrics{}
- }
- return protoPoolScopeMetrics.Get().(*otlpmetrics.ScopeMetrics)
-}
-
-func DeleteOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigInstrumentationScope(&orig.Scope, false)
- for i := range orig.Metrics {
- DeleteOrigMetric(orig.Metrics[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolScopeMetrics.Put(orig)
- }
-}
-
-func CopyOrigScopeMetrics(dest, src *otlpmetrics.ScopeMetrics) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigInstrumentationScope(&dest.Scope, &src.Scope)
- dest.Metrics = CopyOrigMetricSlice(dest.Metrics, src.Metrics)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigScopeMetrics() *otlpmetrics.ScopeMetrics {
- orig := NewOrigScopeMetrics()
- orig.Scope = *GenTestOrigInstrumentationScope()
- orig.Metrics = GenerateOrigTestMetricSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("scope")
- MarshalJSONOrigInstrumentationScope(&orig.Scope, dest)
- if len(orig.Metrics) > 0 {
- dest.WriteObjectField("metrics")
- dest.WriteArrayStart()
- MarshalJSONOrigMetric(orig.Metrics[0], dest)
- for i := 1; i < len(orig.Metrics); i++ {
- dest.WriteMore()
- MarshalJSONOrigMetric(orig.Metrics[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigScopeMetrics unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "scope":
- UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter)
- case "metrics":
- for iter.ReadArray() {
- orig.Metrics = append(orig.Metrics, NewOrigMetric())
- UnmarshalJSONOrigMetric(orig.Metrics[len(orig.Metrics)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigInstrumentationScope(&orig.Scope)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.Metrics {
- l = SizeProtoOrigMetric(orig.Metrics[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.Metrics) - 1; i >= 0; i-- {
- l = MarshalProtoOrigMetric(orig.Metrics[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigScopeMetrics(orig *otlpmetrics.ScopeMetrics, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Metrics = append(orig.Metrics, NewOrigMetric())
- err = UnmarshalProtoOrigMetric(orig.Metrics[len(orig.Metrics)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetricsslice.go
deleted file mode 100644
index 31cfbf52e..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopemetricsslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigScopeMetricsSlice(dest, src []*otlpmetrics.ScopeMetrics) []*otlpmetrics.ScopeMetrics {
- var newDest []*otlpmetrics.ScopeMetrics
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.ScopeMetrics, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeMetrics()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigScopeMetrics(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeMetrics()
- }
- }
- for i := range src {
- CopyOrigScopeMetrics(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestScopeMetricsSlice() []*otlpmetrics.ScopeMetrics {
- orig := make([]*otlpmetrics.ScopeMetrics, 5)
- orig[0] = NewOrigScopeMetrics()
- orig[1] = GenTestOrigScopeMetrics()
- orig[2] = NewOrigScopeMetrics()
- orig[3] = GenTestOrigScopeMetrics()
- orig[4] = NewOrigScopeMetrics()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofiles.go
deleted file mode 100644
index b64845a36..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofiles.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolScopeProfiles = sync.Pool{
- New: func() any {
- return &otlpprofiles.ScopeProfiles{}
- },
- }
-)
-
-func NewOrigScopeProfiles() *otlpprofiles.ScopeProfiles {
- if !UseProtoPooling.IsEnabled() {
- return &otlpprofiles.ScopeProfiles{}
- }
- return protoPoolScopeProfiles.Get().(*otlpprofiles.ScopeProfiles)
-}
-
-func DeleteOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigInstrumentationScope(&orig.Scope, false)
- for i := range orig.Profiles {
- DeleteOrigProfile(orig.Profiles[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolScopeProfiles.Put(orig)
- }
-}
-
-func CopyOrigScopeProfiles(dest, src *otlpprofiles.ScopeProfiles) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigInstrumentationScope(&dest.Scope, &src.Scope)
- dest.Profiles = CopyOrigProfileSlice(dest.Profiles, src.Profiles)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigScopeProfiles() *otlpprofiles.ScopeProfiles {
- orig := NewOrigScopeProfiles()
- orig.Scope = *GenTestOrigInstrumentationScope()
- orig.Profiles = GenerateOrigTestProfileSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("scope")
- MarshalJSONOrigInstrumentationScope(&orig.Scope, dest)
- if len(orig.Profiles) > 0 {
- dest.WriteObjectField("profiles")
- dest.WriteArrayStart()
- MarshalJSONOrigProfile(orig.Profiles[0], dest)
- for i := 1; i < len(orig.Profiles); i++ {
- dest.WriteMore()
- MarshalJSONOrigProfile(orig.Profiles[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigScopeProfiles unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "scope":
- UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter)
- case "profiles":
- for iter.ReadArray() {
- orig.Profiles = append(orig.Profiles, NewOrigProfile())
- UnmarshalJSONOrigProfile(orig.Profiles[len(orig.Profiles)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigInstrumentationScope(&orig.Scope)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.Profiles {
- l = SizeProtoOrigProfile(orig.Profiles[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.Profiles) - 1; i >= 0; i-- {
- l = MarshalProtoOrigProfile(orig.Profiles[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigScopeProfiles(orig *otlpprofiles.ScopeProfiles, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Profiles = append(orig.Profiles, NewOrigProfile())
- err = UnmarshalProtoOrigProfile(orig.Profiles[len(orig.Profiles)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofilesslice.go
deleted file mode 100644
index 01e089413..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopeprofilesslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigScopeProfilesSlice(dest, src []*otlpprofiles.ScopeProfiles) []*otlpprofiles.ScopeProfiles {
- var newDest []*otlpprofiles.ScopeProfiles
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.ScopeProfiles, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeProfiles()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigScopeProfiles(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeProfiles()
- }
- }
- for i := range src {
- CopyOrigScopeProfiles(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestScopeProfilesSlice() []*otlpprofiles.ScopeProfiles {
- orig := make([]*otlpprofiles.ScopeProfiles, 5)
- orig[0] = NewOrigScopeProfiles()
- orig[1] = GenTestOrigScopeProfiles()
- orig[2] = NewOrigScopeProfiles()
- orig[3] = GenTestOrigScopeProfiles()
- orig[4] = NewOrigScopeProfiles()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespans.go
deleted file mode 100644
index d8778228f..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespans.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolScopeSpans = sync.Pool{
- New: func() any {
- return &otlptrace.ScopeSpans{}
- },
- }
-)
-
-func NewOrigScopeSpans() *otlptrace.ScopeSpans {
- if !UseProtoPooling.IsEnabled() {
- return &otlptrace.ScopeSpans{}
- }
- return protoPoolScopeSpans.Get().(*otlptrace.ScopeSpans)
-}
-
-func DeleteOrigScopeSpans(orig *otlptrace.ScopeSpans, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- DeleteOrigInstrumentationScope(&orig.Scope, false)
- for i := range orig.Spans {
- DeleteOrigSpan(orig.Spans[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolScopeSpans.Put(orig)
- }
-}
-
-func CopyOrigScopeSpans(dest, src *otlptrace.ScopeSpans) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- CopyOrigInstrumentationScope(&dest.Scope, &src.Scope)
- dest.Spans = CopyOrigSpanSlice(dest.Spans, src.Spans)
- dest.SchemaUrl = src.SchemaUrl
-}
-
-func GenTestOrigScopeSpans() *otlptrace.ScopeSpans {
- orig := NewOrigScopeSpans()
- orig.Scope = *GenTestOrigInstrumentationScope()
- orig.Spans = GenerateOrigTestSpanSlice()
- orig.SchemaUrl = "test_schemaurl"
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigScopeSpans(orig *otlptrace.ScopeSpans, dest *json.Stream) {
- dest.WriteObjectStart()
- dest.WriteObjectField("scope")
- MarshalJSONOrigInstrumentationScope(&orig.Scope, dest)
- if len(orig.Spans) > 0 {
- dest.WriteObjectField("spans")
- dest.WriteArrayStart()
- MarshalJSONOrigSpan(orig.Spans[0], dest)
- for i := 1; i < len(orig.Spans); i++ {
- dest.WriteMore()
- MarshalJSONOrigSpan(orig.Spans[i], dest)
- }
- dest.WriteArrayEnd()
- }
- if orig.SchemaUrl != "" {
- dest.WriteObjectField("schemaUrl")
- dest.WriteString(orig.SchemaUrl)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigScopeSpans unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigScopeSpans(orig *otlptrace.ScopeSpans, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "scope":
- UnmarshalJSONOrigInstrumentationScope(&orig.Scope, iter)
- case "spans":
- for iter.ReadArray() {
- orig.Spans = append(orig.Spans, NewOrigSpan())
- UnmarshalJSONOrigSpan(orig.Spans[len(orig.Spans)-1], iter)
- }
-
- case "schemaUrl", "schema_url":
- orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigScopeSpans(orig *otlptrace.ScopeSpans) int {
- var n int
- var l int
- _ = l
- l = SizeProtoOrigInstrumentationScope(&orig.Scope)
- n += 1 + proto.Sov(uint64(l)) + l
- for i := range orig.Spans {
- l = SizeProtoOrigSpan(orig.Spans[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigScopeSpans(orig *otlptrace.ScopeSpans, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
-
- l = MarshalProtoOrigInstrumentationScope(&orig.Scope, buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
-
- for i := len(orig.Spans) - 1; i >= 0; i-- {
- l = MarshalProtoOrigSpan(orig.Spans[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- l = len(orig.SchemaUrl)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.SchemaUrl)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x1a
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigScopeSpans(orig *otlptrace.ScopeSpans, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
-
- err = UnmarshalProtoOrigInstrumentationScope(&orig.Scope, buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Spans = append(orig.Spans, NewOrigSpan())
- err = UnmarshalProtoOrigSpan(orig.Spans[len(orig.Spans)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
-
- case 3:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.SchemaUrl = string(buf[startPos:pos])
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespansslice.go
deleted file mode 100644
index 23583b1cd..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_scopespansslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-func CopyOrigScopeSpansSlice(dest, src []*otlptrace.ScopeSpans) []*otlptrace.ScopeSpans {
- var newDest []*otlptrace.ScopeSpans
- if cap(dest) < len(src) {
- newDest = make([]*otlptrace.ScopeSpans, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeSpans()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigScopeSpans(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigScopeSpans()
- }
- }
- for i := range src {
- CopyOrigScopeSpans(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestScopeSpansSlice() []*otlptrace.ScopeSpans {
- orig := make([]*otlptrace.ScopeSpans, 5)
- orig[0] = NewOrigScopeSpans()
- orig[1] = GenTestOrigScopeSpans()
- orig[2] = NewOrigScopeSpans()
- orig[3] = GenTestOrigScopeSpans()
- orig[4] = NewOrigScopeSpans()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_eventslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_eventslice.go
deleted file mode 100644
index b9bd2e445..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_eventslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-func CopyOrigSpan_EventSlice(dest, src []*otlptrace.Span_Event) []*otlptrace.Span_Event {
- var newDest []*otlptrace.Span_Event
- if cap(dest) < len(src) {
- newDest = make([]*otlptrace.Span_Event, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSpan_Event()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigSpan_Event(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSpan_Event()
- }
- }
- for i := range src {
- CopyOrigSpan_Event(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestSpan_EventSlice() []*otlptrace.Span_Event {
- orig := make([]*otlptrace.Span_Event, 5)
- orig[0] = NewOrigSpan_Event()
- orig[1] = GenTestOrigSpan_Event()
- orig[2] = NewOrigSpan_Event()
- orig[3] = GenTestOrigSpan_Event()
- orig[4] = NewOrigSpan_Event()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_linkslice.go
deleted file mode 100644
index 55aed8214..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_span_linkslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-func CopyOrigSpan_LinkSlice(dest, src []*otlptrace.Span_Link) []*otlptrace.Span_Link {
- var newDest []*otlptrace.Span_Link
- if cap(dest) < len(src) {
- newDest = make([]*otlptrace.Span_Link, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSpan_Link()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigSpan_Link(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSpan_Link()
- }
- }
- for i := range src {
- CopyOrigSpan_Link(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestSpan_LinkSlice() []*otlptrace.Span_Link {
- orig := make([]*otlptrace.Span_Link, 5)
- orig[0] = NewOrigSpan_Link()
- orig[1] = GenTestOrigSpan_Link()
- orig[2] = NewOrigSpan_Link()
- orig[3] = GenTestOrigSpan_Link()
- orig[4] = NewOrigSpan_Link()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_spanslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_spanslice.go
deleted file mode 100644
index 7dcbb05f4..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_spanslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-func CopyOrigSpanSlice(dest, src []*otlptrace.Span) []*otlptrace.Span {
- var newDest []*otlptrace.Span
- if cap(dest) < len(src) {
- newDest = make([]*otlptrace.Span, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSpan()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigSpan(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSpan()
- }
- }
- for i := range src {
- CopyOrigSpan(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestSpanSlice() []*otlptrace.Span {
- orig := make([]*otlptrace.Span, 5)
- orig[0] = NewOrigSpan()
- orig[1] = GenTestOrigSpan()
- orig[2] = NewOrigSpan()
- orig[3] = GenTestOrigSpan()
- orig[4] = NewOrigSpan()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_status.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_status.go
deleted file mode 100644
index 26d64e863..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_status.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolStatus = sync.Pool{
- New: func() any {
- return &otlptrace.Status{}
- },
- }
-)
-
-func NewOrigStatus() *otlptrace.Status {
- if !UseProtoPooling.IsEnabled() {
- return &otlptrace.Status{}
- }
- return protoPoolStatus.Get().(*otlptrace.Status)
-}
-
-func DeleteOrigStatus(orig *otlptrace.Status, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolStatus.Put(orig)
- }
-}
-
-func CopyOrigStatus(dest, src *otlptrace.Status) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Message = src.Message
- dest.Code = src.Code
-}
-
-func GenTestOrigStatus() *otlptrace.Status {
- orig := NewOrigStatus()
- orig.Message = "test_message"
- orig.Code = otlptrace.Status_StatusCode(1)
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigStatus(orig *otlptrace.Status, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.Message != "" {
- dest.WriteObjectField("message")
- dest.WriteString(orig.Message)
- }
-
- if int32(orig.Code) != 0 {
- dest.WriteObjectField("code")
- dest.WriteInt32(int32(orig.Code))
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigStatus unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigStatus(orig *otlptrace.Status, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "message":
- orig.Message = iter.ReadString()
- case "code":
- orig.Code = otlptrace.Status_StatusCode(iter.ReadEnumValue(otlptrace.Status_StatusCode_value))
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigStatus(orig *otlptrace.Status) int {
- var n int
- var l int
- _ = l
- l = len(orig.Message)
- if l > 0 {
- n += 1 + proto.Sov(uint64(l)) + l
- }
- if orig.Code != 0 {
- n += 1 + proto.Sov(uint64(orig.Code))
- }
- return n
-}
-
-func MarshalProtoOrigStatus(orig *otlptrace.Status, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- l = len(orig.Message)
- if l > 0 {
- pos -= l
- copy(buf[pos:], orig.Message)
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0x12
- }
- if orig.Code != 0 {
- pos = proto.EncodeVarint(buf, pos, uint64(orig.Code))
- pos--
- buf[pos] = 0x18
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigStatus(orig *otlptrace.Status, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 2:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.Message = string(buf[startPos:pos])
-
- case 3:
- if wireType != proto.WireTypeVarint {
- return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeVarint(buf, pos)
- if err != nil {
- return err
- }
-
- orig.Code = otlptrace.Status_StatusCode(num)
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go
index 3ed5c18cc..d922f65d8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go
@@ -6,32 +6,28 @@
package internal
-type StringSlice struct {
+type StringSliceWrapper struct {
orig *[]string
state *State
}
-func GetOrigStringSlice(ms StringSlice) *[]string {
+func GetStringSliceOrig(ms StringSliceWrapper) *[]string {
return ms.orig
}
-func GetStringSliceState(ms StringSlice) *State {
+func GetStringSliceState(ms StringSliceWrapper) *State {
return ms.state
}
-func NewStringSlice(orig *[]string, state *State) StringSlice {
- return StringSlice{orig: orig, state: state}
+func NewStringSliceWrapper(orig *[]string, state *State) StringSliceWrapper {
+ return StringSliceWrapper{orig: orig, state: state}
}
-func GenerateTestStringSlice() StringSlice {
- orig := GenerateOrigTestStringSlice()
- return NewStringSlice(&orig, NewState())
+func GenTestStringSliceWrapper() StringSliceWrapper {
+ orig := []string{"a", "b", "c"}
+ return NewStringSliceWrapper(&orig, NewState())
}
-func CopyOrigStringSlice(dst, src []string) []string {
- return append(dst[:0], src...)
-}
-
-func GenerateOrigTestStringSlice() []string {
+func GenTestStringSlice() []string {
return []string{"a", "b", "c"}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summary.go
deleted file mode 100644
index 215d60ab4..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summary.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "fmt"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolSummary = sync.Pool{
- New: func() any {
- return &otlpmetrics.Summary{}
- },
- }
-)
-
-func NewOrigSummary() *otlpmetrics.Summary {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.Summary{}
- }
- return protoPoolSummary.Get().(*otlpmetrics.Summary)
-}
-
-func DeleteOrigSummary(orig *otlpmetrics.Summary, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- for i := range orig.DataPoints {
- DeleteOrigSummaryDataPoint(orig.DataPoints[i], true)
- }
-
- orig.Reset()
- if nullable {
- protoPoolSummary.Put(orig)
- }
-}
-
-func CopyOrigSummary(dest, src *otlpmetrics.Summary) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.DataPoints = CopyOrigSummaryDataPointSlice(dest.DataPoints, src.DataPoints)
-}
-
-func GenTestOrigSummary() *otlpmetrics.Summary {
- orig := NewOrigSummary()
- orig.DataPoints = GenerateOrigTestSummaryDataPointSlice()
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSummary(orig *otlpmetrics.Summary, dest *json.Stream) {
- dest.WriteObjectStart()
- if len(orig.DataPoints) > 0 {
- dest.WriteObjectField("dataPoints")
- dest.WriteArrayStart()
- MarshalJSONOrigSummaryDataPoint(orig.DataPoints[0], dest)
- for i := 1; i < len(orig.DataPoints); i++ {
- dest.WriteMore()
- MarshalJSONOrigSummaryDataPoint(orig.DataPoints[i], dest)
- }
- dest.WriteArrayEnd()
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigSummary unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSummary(orig *otlpmetrics.Summary, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "dataPoints", "data_points":
- for iter.ReadArray() {
- orig.DataPoints = append(orig.DataPoints, NewOrigSummaryDataPoint())
- UnmarshalJSONOrigSummaryDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
- }
-
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigSummary(orig *otlpmetrics.Summary) int {
- var n int
- var l int
- _ = l
- for i := range orig.DataPoints {
- l = SizeProtoOrigSummaryDataPoint(orig.DataPoints[i])
- n += 1 + proto.Sov(uint64(l)) + l
- }
- return n
-}
-
-func MarshalProtoOrigSummary(orig *otlpmetrics.Summary, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- for i := len(orig.DataPoints) - 1; i >= 0; i-- {
- l = MarshalProtoOrigSummaryDataPoint(orig.DataPoints[i], buf[:pos])
- pos -= l
- pos = proto.EncodeVarint(buf, pos, uint64(l))
- pos--
- buf[pos] = 0xa
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigSummary(orig *otlpmetrics.Summary, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeLen {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var length int
- length, pos, err = proto.ConsumeLen(buf, pos)
- if err != nil {
- return err
- }
- startPos := pos - length
- orig.DataPoints = append(orig.DataPoints, NewOrigSummaryDataPoint())
- err = UnmarshalProtoOrigSummaryDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
- if err != nil {
- return err
- }
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantile.go
deleted file mode 100644
index 3195880d9..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantile.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- "encoding/binary"
- "fmt"
- "math"
- "sync"
-
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
- "go.opentelemetry.io/collector/pdata/internal/json"
- "go.opentelemetry.io/collector/pdata/internal/proto"
-)
-
-var (
- protoPoolSummaryDataPoint_ValueAtQuantile = sync.Pool{
- New: func() any {
- return &otlpmetrics.SummaryDataPoint_ValueAtQuantile{}
- },
- }
-)
-
-func NewOrigSummaryDataPoint_ValueAtQuantile() *otlpmetrics.SummaryDataPoint_ValueAtQuantile {
- if !UseProtoPooling.IsEnabled() {
- return &otlpmetrics.SummaryDataPoint_ValueAtQuantile{}
- }
- return protoPoolSummaryDataPoint_ValueAtQuantile.Get().(*otlpmetrics.SummaryDataPoint_ValueAtQuantile)
-}
-
-func DeleteOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, nullable bool) {
- if orig == nil {
- return
- }
-
- if !UseProtoPooling.IsEnabled() {
- orig.Reset()
- return
- }
-
- orig.Reset()
- if nullable {
- protoPoolSummaryDataPoint_ValueAtQuantile.Put(orig)
- }
-}
-
-func CopyOrigSummaryDataPoint_ValueAtQuantile(dest, src *otlpmetrics.SummaryDataPoint_ValueAtQuantile) {
- // If copying to same object, just return.
- if src == dest {
- return
- }
- dest.Quantile = src.Quantile
- dest.Value = src.Value
-}
-
-func GenTestOrigSummaryDataPoint_ValueAtQuantile() *otlpmetrics.SummaryDataPoint_ValueAtQuantile {
- orig := NewOrigSummaryDataPoint_ValueAtQuantile()
- orig.Quantile = float64(3.1415926)
- orig.Value = float64(3.1415926)
- return orig
-}
-
-// MarshalJSONOrig marshals all properties from the current struct to the destination stream.
-func MarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, dest *json.Stream) {
- dest.WriteObjectStart()
- if orig.Quantile != float64(0) {
- dest.WriteObjectField("quantile")
- dest.WriteFloat64(orig.Quantile)
- }
- if orig.Value != float64(0) {
- dest.WriteObjectField("value")
- dest.WriteFloat64(orig.Value)
- }
- dest.WriteObjectEnd()
-}
-
-// UnmarshalJSONOrigSummaryDataPointValueAtQuantile unmarshals all properties from the current struct from the source iterator.
-func UnmarshalJSONOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, iter *json.Iterator) {
- for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
- switch f {
- case "quantile":
- orig.Quantile = iter.ReadFloat64()
- case "value":
- orig.Value = iter.ReadFloat64()
- default:
- iter.Skip()
- }
- }
-}
-
-func SizeProtoOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile) int {
- var n int
- var l int
- _ = l
- if orig.Quantile != 0 {
- n += 9
- }
- if orig.Value != 0 {
- n += 9
- }
- return n
-}
-
-func MarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, buf []byte) int {
- pos := len(buf)
- var l int
- _ = l
- if orig.Quantile != 0 {
- pos -= 8
- binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Quantile))
- pos--
- buf[pos] = 0x9
- }
- if orig.Value != 0 {
- pos -= 8
- binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Value))
- pos--
- buf[pos] = 0x11
- }
- return len(buf) - pos
-}
-
-func UnmarshalProtoOrigSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, buf []byte) error {
- var err error
- var fieldNum int32
- var wireType proto.WireType
-
- l := len(buf)
- pos := 0
- for pos < l {
- // If in a group parsing, move to the next tag.
- fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
- if err != nil {
- return err
- }
- switch fieldNum {
-
- case 1:
- if wireType != proto.WireTypeI64 {
- return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeI64(buf, pos)
- if err != nil {
- return err
- }
-
- orig.Quantile = math.Float64frombits(num)
-
- case 2:
- if wireType != proto.WireTypeI64 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var num uint64
- num, pos, err = proto.ConsumeI64(buf, pos)
- if err != nil {
- return err
- }
-
- orig.Value = math.Float64frombits(num)
- default:
- pos, err = proto.ConsumeUnknown(buf, pos, wireType)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantileslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantileslice.go
deleted file mode 100644
index e8341dee4..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapoint_valueatquantileslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigSummaryDataPoint_ValueAtQuantileSlice(dest, src []*otlpmetrics.SummaryDataPoint_ValueAtQuantile) []*otlpmetrics.SummaryDataPoint_ValueAtQuantile {
- var newDest []*otlpmetrics.SummaryDataPoint_ValueAtQuantile
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSummaryDataPoint_ValueAtQuantile()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigSummaryDataPoint_ValueAtQuantile(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSummaryDataPoint_ValueAtQuantile()
- }
- }
- for i := range src {
- CopyOrigSummaryDataPoint_ValueAtQuantile(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestSummaryDataPoint_ValueAtQuantileSlice() []*otlpmetrics.SummaryDataPoint_ValueAtQuantile {
- orig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, 5)
- orig[0] = NewOrigSummaryDataPoint_ValueAtQuantile()
- orig[1] = GenTestOrigSummaryDataPoint_ValueAtQuantile()
- orig[2] = NewOrigSummaryDataPoint_ValueAtQuantile()
- orig[3] = GenTestOrigSummaryDataPoint_ValueAtQuantile()
- orig[4] = NewOrigSummaryDataPoint_ValueAtQuantile()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapointslice.go
deleted file mode 100644
index 2da01f377..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_summarydatapointslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-func CopyOrigSummaryDataPointSlice(dest, src []*otlpmetrics.SummaryDataPoint) []*otlpmetrics.SummaryDataPoint {
- var newDest []*otlpmetrics.SummaryDataPoint
- if cap(dest) < len(src) {
- newDest = make([]*otlpmetrics.SummaryDataPoint, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSummaryDataPoint()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigSummaryDataPoint(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigSummaryDataPoint()
- }
- }
- for i := range src {
- CopyOrigSummaryDataPoint(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestSummaryDataPointSlice() []*otlpmetrics.SummaryDataPoint {
- orig := make([]*otlpmetrics.SummaryDataPoint, 5)
- orig[0] = NewOrigSummaryDataPoint()
- orig[1] = GenTestOrigSummaryDataPoint()
- orig[2] = NewOrigSummaryDataPoint()
- orig[3] = GenTestOrigSummaryDataPoint()
- orig[4] = NewOrigSummaryDataPoint()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go
index 954987d85..c20fd9d0e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go
@@ -6,32 +6,28 @@
package internal
-type UInt64Slice struct {
+type UInt64SliceWrapper struct {
orig *[]uint64
state *State
}
-func GetOrigUInt64Slice(ms UInt64Slice) *[]uint64 {
+func GetUInt64SliceOrig(ms UInt64SliceWrapper) *[]uint64 {
return ms.orig
}
-func GetUInt64SliceState(ms UInt64Slice) *State {
+func GetUInt64SliceState(ms UInt64SliceWrapper) *State {
return ms.state
}
-func NewUInt64Slice(orig *[]uint64, state *State) UInt64Slice {
- return UInt64Slice{orig: orig, state: state}
+func NewUInt64SliceWrapper(orig *[]uint64, state *State) UInt64SliceWrapper {
+ return UInt64SliceWrapper{orig: orig, state: state}
}
-func GenerateTestUInt64Slice() UInt64Slice {
- orig := GenerateOrigTestUint64Slice()
- return NewUInt64Slice(&orig, NewState())
+func GenTestUInt64SliceWrapper() UInt64SliceWrapper {
+ orig := []uint64{1, 2, 3}
+ return NewUInt64SliceWrapper(&orig, NewState())
}
-func CopyOrigUint64Slice(dst, src []uint64) []uint64 {
- return append(dst[:0], src...)
-}
-
-func GenerateOrigTestUint64Slice() []uint64 {
+func GenTestUint64Slice() []uint64 {
return []uint64{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetypeslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetypeslice.go
deleted file mode 100644
index 6b1973a71..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_valuetypeslice.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-func CopyOrigValueTypeSlice(dest, src []*otlpprofiles.ValueType) []*otlpprofiles.ValueType {
- var newDest []*otlpprofiles.ValueType
- if cap(dest) < len(src) {
- newDest = make([]*otlpprofiles.ValueType, len(src))
- // Copy old pointers to re-use.
- copy(newDest, dest)
- // Add new pointers for missing elements from len(dest) to len(srt).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigValueType()
- }
- } else {
- newDest = dest[:len(src)]
- // Cleanup the rest of the elements so GC can free the memory.
- // This can happen when len(src) < len(dest) < cap(dest).
- for i := len(src); i < len(dest); i++ {
- DeleteOrigValueType(dest[i], true)
- dest[i] = nil
- }
- // Add new pointers for missing elements.
- // This can happen when len(dest) < len(src) < cap(dest).
- for i := len(dest); i < len(src); i++ {
- newDest[i] = NewOrigValueType()
- }
- }
- for i := range src {
- CopyOrigValueType(newDest[i], src[i])
- }
- return newDest
-}
-
-func GenerateOrigTestValueTypeSlice() []*otlpprofiles.ValueType {
- orig := make([]*otlpprofiles.ValueType, 5)
- orig[0] = NewOrigValueType()
- orig[1] = GenTestOrigValueType()
- orig[2] = NewOrigValueType()
- orig[3] = GenTestOrigValueType()
- orig[4] = NewOrigValueType()
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/ids.go b/vendor/go.opentelemetry.io/collector/pdata/internal/ids.go
deleted file mode 100644
index 59b9a9456..000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/ids.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/collector/pdata/internal"
-
-import (
- "go.opentelemetry.io/collector/pdata/internal/data"
- "go.opentelemetry.io/collector/pdata/internal/json"
-)
-
-func DeleteOrigTraceID(*data.TraceID, bool) {}
-
-func DeleteOrigSpanID(*data.SpanID, bool) {}
-
-func DeleteOrigProfileID(*data.ProfileID, bool) {}
-
-func GenTestOrigTraceID() *data.TraceID {
- id := data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})
- return &id
-}
-
-func GenTestOrigSpanID() *data.SpanID {
- id := data.SpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})
- return &id
-}
-
-func GenTestOrigProfileID() *data.ProfileID {
- id := data.ProfileID([16]byte{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
- return &id
-}
-
-func MarshalJSONOrigTraceID(id *data.TraceID, dest *json.Stream) {
- id.MarshalJSONStream(dest)
-}
-
-func MarshalJSONOrigSpanID(id *data.SpanID, dest *json.Stream) {
- id.MarshalJSONStream(dest)
-}
-
-func MarshalJSONOrigProfileID(id *data.ProfileID, dest *json.Stream) {
- id.MarshalJSONStream(dest)
-}
-
-func UnmarshalJSONOrigTraceID(id *data.TraceID, iter *json.Iterator) {
- id.UnmarshalJSONIter(iter)
-}
-
-func UnmarshalJSONOrigSpanID(id *data.SpanID, iter *json.Iterator) {
- id.UnmarshalJSONIter(iter)
-}
-
-func UnmarshalJSONOrigProfileID(id *data.ProfileID, iter *json.Iterator) {
- id.UnmarshalJSONIter(iter)
-}
-
-func SizeProtoOrigTraceID(id *data.TraceID) int {
- return id.Size()
-}
-
-func SizeProtoOrigSpanID(id *data.SpanID) int {
- return id.Size()
-}
-
-func SizeProtoOrigProfileID(id *data.ProfileID) int {
- return id.Size()
-}
-
-func MarshalProtoOrigTraceID(id *data.TraceID, buf []byte) int {
- size := id.Size()
- _, _ = id.MarshalTo(buf[len(buf)-size:])
- return size
-}
-
-func MarshalProtoOrigSpanID(id *data.SpanID, buf []byte) int {
- size := id.Size()
- _, _ = id.MarshalTo(buf[len(buf)-size:])
- return size
-}
-
-func MarshalProtoOrigProfileID(id *data.ProfileID, buf []byte) int {
- size := id.Size()
- _, _ = id.MarshalTo(buf[len(buf)-size:])
- return size
-}
-
-func UnmarshalProtoOrigTraceID(id *data.TraceID, buf []byte) error {
- return id.Unmarshal(buf)
-}
-
-func UnmarshalProtoOrigSpanID(id *data.SpanID, buf []byte) error {
- return id.Unmarshal(buf)
-}
-
-func UnmarshalProtoOrigProfileID(id *data.ProfileID, buf []byte) error {
- return id.Unmarshal(buf)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go
index c0328a5b4..aad78de4e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go
@@ -4,12 +4,12 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateLogs implements any translation needed due to deprecation in OTLP logs protocol.
// Any plog.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateLogs(rls []*otlplogs.ResourceLogs) {
+func MigrateLogs(rls []*internal.ResourceLogs) {
for _, rl := range rls {
if len(rl.ScopeLogs) == 0 {
rl.ScopeLogs = rl.DeprecatedScopeLogs
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go
index 9a7da1486..fb1776c4f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go
@@ -4,12 +4,12 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateMetrics implements any translation needed due to deprecation in OTLP metrics protocol.
// Any pmetric.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateMetrics(rms []*otlpmetrics.ResourceMetrics) {
+func MigrateMetrics(rms []*internal.ResourceMetrics) {
for _, rm := range rms {
if len(rm.ScopeMetrics) == 0 {
rm.ScopeMetrics = rm.DeprecatedScopeMetrics
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go
index 59c23cc67..5144e7c4d 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go
@@ -4,9 +4,9 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateProfiles implements any translation needed due to deprecation in OTLP profiles protocol.
// Any pprofile.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateProfiles(_ []*otlpprofiles.ResourceProfiles) {}
+func MigrateProfiles(_ []*internal.ResourceProfiles) {}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go
index 627881fc3..84f5deade 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go
@@ -4,12 +4,12 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateTraces implements any translation needed due to deprecation in OTLP traces protocol.
// Any ptrace.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateTraces(rss []*otlptrace.ResourceSpans) {
+func MigrateTraces(rss []*internal.ResourceSpans) {
for _, rs := range rss {
if len(rs.ScopeSpans) == 0 {
rs.ScopeSpans = rs.DeprecatedScopeSpans
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go
new file mode 100644
index 000000000..5a038fe08
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go
@@ -0,0 +1,83 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
+
+import (
+ "encoding/hex"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+)
+
+const profileIDSize = 16
+
+var errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length")
+
+// ProfileID is a custom data type that is used for all profile_id fields in OTLP
+// Protobuf messages.
+type ProfileID [profileIDSize]byte
+
+func DeleteProfileID(*ProfileID, bool) {}
+
+func CopyProfileID(dest, src *ProfileID) {
+ *dest = *src
+}
+
+// IsEmpty returns true if id contains at leas one non-zero byte.
+func (pid ProfileID) IsEmpty() bool {
+ return pid == [profileIDSize]byte{}
+}
+
+// SizeProto returns the size of the data to serialize in proto format.
+func (pid ProfileID) SizeProto() int {
+ if pid.IsEmpty() {
+ return 0
+ }
+
+ return profileIDSize
+}
+
+// MarshalProto converts profile ID into a binary representation. Called by Protobuf serialization.
+func (pid ProfileID) MarshalProto(buf []byte) int {
+ if pid.IsEmpty() {
+ return 0
+ }
+
+ return copy(buf[len(buf)-profileIDSize:], pid[:])
+}
+
+// UnmarshalProto inflates this profile ID from binary representation. Called by Protobuf serialization.
+func (pid *ProfileID) UnmarshalProto(buf []byte) error {
+ if len(buf) == 0 {
+ *pid = [profileIDSize]byte{}
+ return nil
+ }
+
+ if len(buf) != profileIDSize {
+ return errUnmarshalProfileID
+ }
+
+ copy(pid[:], buf)
+ return nil
+}
+
+// MarshalJSON converts ProfileID into a hex string.
+//
+//nolint:govet
+func (pid ProfileID) MarshalJSON(dest *json.Stream) {
+ dest.WriteString(hex.EncodeToString(pid[:]))
+}
+
+// UnmarshalJSON decodes ProfileID from hex string.
+//
+//nolint:govet
+func (pid *ProfileID) UnmarshalJSON(iter *json.Iterator) {
+ *pid = [profileIDSize]byte{}
+ unmarshalJSON(pid[:], iter)
+}
+
+func GenTestProfileID() *ProfileID {
+ pid := ProfileID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
+ return &pid
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go
new file mode 100644
index 000000000..9ec0f465f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
+
+import (
+ "encoding/hex"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+)
+
+const spanIDSize = 8
+
+var errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length")
+
+// SpanID is a custom data type that is used for all span_id fields in OTLP
+// Protobuf messages.
+type SpanID [spanIDSize]byte
+
+func DeleteSpanID(*SpanID, bool) {}
+
+func CopySpanID(dest, src *SpanID) {
+ *dest = *src
+}
+
+// IsEmpty returns true if id contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// SizeProto returns the size of the data to serialize in proto format.
+func (sid SpanID) SizeProto() int {
+ if sid.IsEmpty() {
+ return 0
+ }
+ return spanIDSize
+}
+
+// MarshalProto converts span ID into a binary representation. Called by Protobuf serialization.
+func (sid SpanID) MarshalProto(buf []byte) int {
+ if sid.IsEmpty() {
+ return 0
+ }
+
+ return copy(buf[len(buf)-spanIDSize:], sid[:])
+}
+
+// UnmarshalProto inflates this span ID from binary representation. Called by Protobuf serialization.
+func (sid *SpanID) UnmarshalProto(data []byte) error {
+ if len(data) == 0 {
+ *sid = [spanIDSize]byte{}
+ return nil
+ }
+
+ if len(data) != spanIDSize {
+ return errUnmarshalSpanID
+ }
+
+ copy(sid[:], data)
+ return nil
+}
+
+// MarshalJSON converts SpanID into a hex string.
+//
+//nolint:govet
+func (sid SpanID) MarshalJSON(dest *json.Stream) {
+ dest.WriteString(hex.EncodeToString(sid[:]))
+}
+
+// UnmarshalJSON decodes SpanID from hex string.
+//
+//nolint:govet
+func (sid *SpanID) UnmarshalJSON(iter *json.Iterator) {
+ *sid = [spanIDSize]byte{}
+ unmarshalJSON(sid[:], iter)
+}
+
+func GenTestSpanID() *SpanID {
+ sid := SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
+ return &sid
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/state.go b/vendor/go.opentelemetry.io/collector/pdata/internal/state.go
index 7596caa74..46f10722d 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/state.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/state.go
@@ -8,12 +8,13 @@ import (
"go.opentelemetry.io/collector/featuregate"
)
-var UseCustomProtoEncoding = featuregate.GlobalRegistry().MustRegister(
+var _ = featuregate.GlobalRegistry().MustRegister(
"pdata.useCustomProtoEncoding",
- featuregate.StageBeta,
+ featuregate.StageStable,
featuregate.WithRegisterDescription("When enabled, enable custom proto encoding. This is required step to enable featuregate pdata.useProtoPooling."),
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/13631"),
featuregate.WithRegisterFromVersion("v0.133.0"),
+ featuregate.WithRegisterToVersion("v0.137.0"),
)
var UseProtoPooling = featuregate.GlobalRegistry().MustRegister(
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go
new file mode 100644
index 000000000..fe54123de
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go
@@ -0,0 +1,83 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
+
+import (
+ "encoding/hex"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+)
+
+const traceIDSize = 16
+
+var errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length")
+
+// TraceID is a custom data type that is used for all trace_id fields in OTLP
+// Protobuf messages.
+type TraceID [traceIDSize]byte
+
+func DeleteTraceID(*TraceID, bool) {}
+
+func CopyTraceID(dest, src *TraceID) {
+ *dest = *src
+}
+
+// IsEmpty returns true if id contains at leas one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// SizeProto returns the size of the data to serialize in proto format.
+func (tid TraceID) SizeProto() int {
+ if tid.IsEmpty() {
+ return 0
+ }
+
+ return traceIDSize
+}
+
+// MarshalProto converts trace ID into a binary representation. Called by Protobuf serialization.
+func (tid TraceID) MarshalProto(buf []byte) int {
+ if tid.IsEmpty() {
+ return 0
+ }
+
+ return copy(buf[len(buf)-traceIDSize:], tid[:])
+}
+
+// UnmarshalProto inflates this trace ID from binary representation. Called by Protobuf serialization.
+func (tid *TraceID) UnmarshalProto(buf []byte) error {
+ if len(buf) == 0 {
+ *tid = [traceIDSize]byte{}
+ return nil
+ }
+
+ if len(buf) != traceIDSize {
+ return errUnmarshalTraceID
+ }
+
+ copy(tid[:], buf)
+ return nil
+}
+
+// MarshalJSON converts TraceID into a hex string.
+//
+//nolint:govet
+func (tid TraceID) MarshalJSON(dest *json.Stream) {
+ dest.WriteString(hex.EncodeToString(tid[:]))
+}
+
+// UnmarshalJSON decodes TraceID from hex string.
+//
+//nolint:govet
+func (tid *TraceID) UnmarshalJSON(iter *json.Iterator) {
+ *tid = [profileIDSize]byte{}
+ unmarshalJSON(tid[:], iter)
+}
+
+func GenTestTraceID() *TraceID {
+ tid := TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
+ return &tid
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go
index 31b20766c..b47b50833 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go
@@ -3,22 +3,17 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
// LogsToProto internal helper to convert Logs to protobuf representation.
-func LogsToProto(l Logs) otlplogs.LogsData {
- return otlplogs.LogsData{
+func LogsToProto(l LogsWrapper) LogsData {
+ return LogsData{
ResourceLogs: l.orig.ResourceLogs,
}
}
// LogsFromProto internal helper to convert protobuf representation to Logs.
// This function set exclusive state assuming that it's called only once per Logs.
-func LogsFromProto(orig otlplogs.LogsData) Logs {
- return NewLogs(&otlpcollectorlog.ExportLogsServiceRequest{
+func LogsFromProto(orig LogsData) LogsWrapper {
+ return NewLogsWrapper(&ExportLogsServiceRequest{
ResourceLogs: orig.ResourceLogs,
}, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go
index 33453c0e6..c5c6ca7f6 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go
@@ -3,28 +3,24 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type Map struct {
- orig *[]otlpcommon.KeyValue
+type MapWrapper struct {
+ orig *[]KeyValue
state *State
}
-func GetOrigMap(ms Map) *[]otlpcommon.KeyValue {
+func GetMapOrig(ms MapWrapper) *[]KeyValue {
return ms.orig
}
-func GetMapState(ms Map) *State {
+func GetMapState(ms MapWrapper) *State {
return ms.state
}
-func NewMap(orig *[]otlpcommon.KeyValue, state *State) Map {
- return Map{orig: orig, state: state}
+func NewMapWrapper(orig *[]KeyValue, state *State) MapWrapper {
+ return MapWrapper{orig: orig, state: state}
}
-func GenerateTestMap() Map {
- orig := GenerateOrigTestKeyValueSlice()
- return NewMap(&orig, NewState())
+func GenTestMapWrapper() MapWrapper {
+ orig := GenTestKeyValueSlice()
+ return NewMapWrapper(&orig, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go
index ef684b284..3cb790395 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go
@@ -3,22 +3,17 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
// MetricsToProto internal helper to convert Metrics to protobuf representation.
-func MetricsToProto(l Metrics) otlpmetrics.MetricsData {
- return otlpmetrics.MetricsData{
+func MetricsToProto(l MetricsWrapper) MetricsData {
+ return MetricsData{
ResourceMetrics: l.orig.ResourceMetrics,
}
}
// MetricsFromProto internal helper to convert protobuf representation to Metrics.
// This function set exclusive state assuming that it's called only once per Metrics.
-func MetricsFromProto(orig otlpmetrics.MetricsData) Metrics {
- return NewMetrics(&otlpcollectormetrics.ExportMetricsServiceRequest{
+func MetricsFromProto(orig MetricsData) MetricsWrapper {
+ return NewMetricsWrapper(&ExportMetricsServiceRequest{
ResourceMetrics: orig.ResourceMetrics,
}, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go
index add526356..1a56d9229 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go
@@ -3,14 +3,9 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
- otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
// ProfilesToProto internal helper to convert Profiles to protobuf representation.
-func ProfilesToProto(l Profiles) otlpprofile.ProfilesData {
- return otlpprofile.ProfilesData{
+func ProfilesToProto(l ProfilesWrapper) ProfilesData {
+ return ProfilesData{
ResourceProfiles: l.orig.ResourceProfiles,
Dictionary: l.orig.Dictionary,
}
@@ -18,8 +13,8 @@ func ProfilesToProto(l Profiles) otlpprofile.ProfilesData {
// ProfilesFromProto internal helper to convert protobuf representation to Profiles.
// This function set exclusive state assuming that it's called only once per Profiles.
-func ProfilesFromProto(orig otlpprofile.ProfilesData) Profiles {
- return NewProfiles(&otlpcollectorprofile.ExportProfilesServiceRequest{
+func ProfilesFromProto(orig ProfilesData) ProfilesWrapper {
+ return NewProfilesWrapper(&ExportProfilesServiceRequest{
ResourceProfiles: orig.ResourceProfiles,
Dictionary: orig.Dictionary,
}, NewState())
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go
index bdc7d93bb..758a1cb27 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go
@@ -3,22 +3,17 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
// TracesToProto internal helper to convert Traces to protobuf representation.
-func TracesToProto(l Traces) otlptrace.TracesData {
- return otlptrace.TracesData{
+func TracesToProto(l TracesWrapper) TracesData {
+ return TracesData{
ResourceSpans: l.orig.ResourceSpans,
}
}
// TracesFromProto internal helper to convert protobuf representation to Traces.
// This function set exclusive state assuming that it's called only once per Traces.
-func TracesFromProto(orig otlptrace.TracesData) Traces {
- return NewTraces(&otlpcollectortrace.ExportTraceServiceRequest{
+func TracesFromProto(orig TracesData) TracesWrapper {
+ return NewTracesWrapper(&ExportTraceServiceRequest{
ResourceSpans: orig.ResourceSpans,
}, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go
index ce3bfe232..56278b728 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go
@@ -3,41 +3,28 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- "go.opentelemetry.io/collector/pdata/internal/json"
-)
-
-type TraceState struct {
+type TraceStateWrapper struct {
orig *string
state *State
}
-func GetOrigTraceState(ms TraceState) *string {
+func GetTraceStateOrig(ms TraceStateWrapper) *string {
return ms.orig
}
-func GetTraceStateState(ms TraceState) *State {
+func GetTraceStateState(ms TraceStateWrapper) *State {
return ms.state
}
-func NewTraceState(orig *string, state *State) TraceState {
- return TraceState{orig: orig, state: state}
-}
-
-func GenerateTestTraceState() TraceState {
- return NewTraceState(GenTestOrigTraceState(), NewState())
-}
-
-// UnmarshalJSONOrigTraceState marshals all properties from the current struct to the destination stream.
-func UnmarshalJSONOrigTraceState(orig *string, iter *json.Iterator) {
- *orig = iter.ReadString()
+func NewTraceStateWrapper(orig *string, state *State) TraceStateWrapper {
+ return TraceStateWrapper{orig: orig, state: state}
}
-func CopyOrigTraceState(dest, src *string) {
- *dest = *src
+func GenTestTraceStateWrapper() TraceStateWrapper {
+ return NewTraceStateWrapper(GenTestTraceState(), NewState())
}
-func GenTestOrigTraceState() *string {
+func GenTestTraceState() *string {
orig := new(string)
*orig = "rojo=00f067aa0ba902b7"
return orig
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go
index 43839ae82..c0220c8f0 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go
@@ -3,72 +3,73 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type Value struct {
- orig *otlpcommon.AnyValue
+type ValueWrapper struct {
+ orig *AnyValue
state *State
}
-func GetOrigValue(ms Value) *otlpcommon.AnyValue {
+func GetValueOrig(ms ValueWrapper) *AnyValue {
return ms.orig
}
-func GetValueState(ms Value) *State {
+func GetValueState(ms ValueWrapper) *State {
return ms.state
}
-func NewValue(orig *otlpcommon.AnyValue, state *State) Value {
- return Value{orig: orig, state: state}
+func NewValueWrapper(orig *AnyValue, state *State) ValueWrapper {
+ return ValueWrapper{orig: orig, state: state}
+}
+
+func GenTestValueWrapper() ValueWrapper {
+ orig := GenTestAnyValue()
+ return NewValueWrapper(orig, NewState())
}
-func NewOrigAnyValueStringValue() *otlpcommon.AnyValue_StringValue {
+func NewAnyValueStringValue() *AnyValue_StringValue {
if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue_StringValue{}
+ return &AnyValue_StringValue{}
}
- return ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
+ return ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
}
-func NewOrigAnyValueIntValue() *otlpcommon.AnyValue_IntValue {
+func NewAnyValueIntValue() *AnyValue_IntValue {
if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue_IntValue{}
+ return &AnyValue_IntValue{}
}
- return ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
+ return ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
}
-func NewOrigAnyValueBoolValue() *otlpcommon.AnyValue_BoolValue {
+func NewAnyValueBoolValue() *AnyValue_BoolValue {
if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue_BoolValue{}
+ return &AnyValue_BoolValue{}
}
- return ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
+ return ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
}
-func NewOrigAnyValueDoubleValue() *otlpcommon.AnyValue_DoubleValue {
+func NewAnyValueDoubleValue() *AnyValue_DoubleValue {
if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue_DoubleValue{}
+ return &AnyValue_DoubleValue{}
}
- return ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
+ return ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
}
-func NewOrigAnyValueBytesValue() *otlpcommon.AnyValue_BytesValue {
+func NewAnyValueBytesValue() *AnyValue_BytesValue {
if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue_BytesValue{}
+ return &AnyValue_BytesValue{}
}
- return ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
+ return ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
}
-func NewOrigAnyValueArrayValue() *otlpcommon.AnyValue_ArrayValue {
+func NewAnyValueArrayValue() *AnyValue_ArrayValue {
if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue_ArrayValue{}
+ return &AnyValue_ArrayValue{}
}
- return ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
+ return ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
}
-func NewOrigAnyValueKvlistValue() *otlpcommon.AnyValue_KvlistValue {
+func NewAnyValueKvlistValue() *AnyValue_KvlistValue {
if !UseProtoPooling.IsEnabled() {
- return &otlpcommon.AnyValue_KvlistValue{}
+ return &AnyValue_KvlistValue{}
}
- return ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
+ return ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
index 777bec8d3..073b55d34 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
@@ -18,31 +18,31 @@ import (
//
// Must use NewByteSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type ByteSlice internal.ByteSlice
+type ByteSlice internal.ByteSliceWrapper
func (ms ByteSlice) getOrig() *[]byte {
- return internal.GetOrigByteSlice(internal.ByteSlice(ms))
+ return internal.GetByteSliceOrig(internal.ByteSliceWrapper(ms))
}
func (ms ByteSlice) getState() *internal.State {
- return internal.GetByteSliceState(internal.ByteSlice(ms))
+ return internal.GetByteSliceState(internal.ByteSliceWrapper(ms))
}
// NewByteSlice creates a new empty ByteSlice.
func NewByteSlice() ByteSlice {
orig := []byte(nil)
- return ByteSlice(internal.NewByteSlice(&orig, internal.NewState()))
+ return ByteSlice(internal.NewByteSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []byte slice.
func (ms ByteSlice) AsRaw() []byte {
- return internal.CopyOrigByteSlice(nil, *ms.getOrig())
+ return copyByteSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []byte into the slice ByteSlice.
func (ms ByteSlice) FromRaw(val []byte) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigByteSlice(*ms.getOrig(), val)
+ *ms.getOrig() = copyByteSlice(*ms.getOrig(), val)
}
// Len returns length of the []byte slice value.
@@ -127,16 +127,42 @@ func (ms ByteSlice) MoveAndAppendTo(dest ByteSlice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms ByteSlice) RemoveIf(f func(byte) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero byte
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms ByteSlice) CopyTo(dest ByteSlice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = internal.CopyOrigByteSlice(*dest.getOrig(), *ms.getOrig())
+ *dest.getOrig() = copyByteSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another ByteSlice
func (ms ByteSlice) Equal(val ByteSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyByteSlice(dst, src []byte) []byte {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
index b7df94894..dac9ebc4e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
@@ -18,31 +18,31 @@ import (
//
// Must use NewFloat64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Float64Slice internal.Float64Slice
+type Float64Slice internal.Float64SliceWrapper
func (ms Float64Slice) getOrig() *[]float64 {
- return internal.GetOrigFloat64Slice(internal.Float64Slice(ms))
+ return internal.GetFloat64SliceOrig(internal.Float64SliceWrapper(ms))
}
func (ms Float64Slice) getState() *internal.State {
- return internal.GetFloat64SliceState(internal.Float64Slice(ms))
+ return internal.GetFloat64SliceState(internal.Float64SliceWrapper(ms))
}
// NewFloat64Slice creates a new empty Float64Slice.
func NewFloat64Slice() Float64Slice {
orig := []float64(nil)
- return Float64Slice(internal.NewFloat64Slice(&orig, internal.NewState()))
+ return Float64Slice(internal.NewFloat64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []float64 slice.
func (ms Float64Slice) AsRaw() []float64 {
- return internal.CopyOrigFloat64Slice(nil, *ms.getOrig())
+ return copyFloat64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []float64 into the slice Float64Slice.
func (ms Float64Slice) FromRaw(val []float64) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigFloat64Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyFloat64Slice(*ms.getOrig(), val)
}
// Len returns length of the []float64 slice value.
@@ -127,16 +127,42 @@ func (ms Float64Slice) MoveAndAppendTo(dest Float64Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms Float64Slice) RemoveIf(f func(float64) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero float64
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Float64Slice) CopyTo(dest Float64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = internal.CopyOrigFloat64Slice(*dest.getOrig(), *ms.getOrig())
+ *dest.getOrig() = copyFloat64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Float64Slice
func (ms Float64Slice) Equal(val Float64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyFloat64Slice(dst, src []float64) []float64 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go
index b4385a088..6eeeab6e3 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go
@@ -8,7 +8,6 @@ package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// InstrumentationScope is a message representing the instrumentation scope information.
@@ -18,10 +17,10 @@ import (
//
// Must use NewInstrumentationScope function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type InstrumentationScope internal.InstrumentationScope
+type InstrumentationScope internal.InstrumentationScopeWrapper
-func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *internal.State) InstrumentationScope {
- return InstrumentationScope(internal.NewInstrumentationScope(orig, state))
+func newInstrumentationScope(orig *internal.InstrumentationScope, state *internal.State) InstrumentationScope {
+ return InstrumentationScope(internal.NewInstrumentationScopeWrapper(orig, state))
}
// NewInstrumentationScope creates a new empty InstrumentationScope.
@@ -29,7 +28,7 @@ func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *inter
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewInstrumentationScope() InstrumentationScope {
- return newInstrumentationScope(internal.NewOrigInstrumentationScope(), internal.NewState())
+ return newInstrumentationScope(internal.NewInstrumentationScope(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -41,7 +40,7 @@ func (ms InstrumentationScope) MoveTo(dest InstrumentationScope) {
if ms.getOrig() == dest.getOrig() {
return
}
- internal.DeleteOrigInstrumentationScope(dest.getOrig(), false)
+ internal.DeleteInstrumentationScope(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
@@ -69,7 +68,7 @@ func (ms InstrumentationScope) SetVersion(v string) {
// Attributes returns the Attributes associated with this InstrumentationScope.
func (ms InstrumentationScope) Attributes() Map {
- return Map(internal.NewMap(&ms.getOrig().Attributes, ms.getState()))
+ return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this InstrumentationScope.
@@ -86,13 +85,13 @@ func (ms InstrumentationScope) SetDroppedAttributesCount(v uint32) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms InstrumentationScope) CopyTo(dest InstrumentationScope) {
dest.getState().AssertMutable()
- internal.CopyOrigInstrumentationScope(dest.getOrig(), ms.getOrig())
+ internal.CopyInstrumentationScope(dest.getOrig(), ms.getOrig())
}
-func (ms InstrumentationScope) getOrig() *otlpcommon.InstrumentationScope {
- return internal.GetOrigInstrumentationScope(internal.InstrumentationScope(ms))
+func (ms InstrumentationScope) getOrig() *internal.InstrumentationScope {
+ return internal.GetInstrumentationScopeOrig(internal.InstrumentationScopeWrapper(ms))
}
func (ms InstrumentationScope) getState() *internal.State {
- return internal.GetInstrumentationScopeState(internal.InstrumentationScope(ms))
+ return internal.GetInstrumentationScopeState(internal.InstrumentationScopeWrapper(ms))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
index 9f0ac608c..d2bb746df 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
@@ -18,31 +18,31 @@ import (
//
// Must use NewInt32Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Int32Slice internal.Int32Slice
+type Int32Slice internal.Int32SliceWrapper
func (ms Int32Slice) getOrig() *[]int32 {
- return internal.GetOrigInt32Slice(internal.Int32Slice(ms))
+ return internal.GetInt32SliceOrig(internal.Int32SliceWrapper(ms))
}
func (ms Int32Slice) getState() *internal.State {
- return internal.GetInt32SliceState(internal.Int32Slice(ms))
+ return internal.GetInt32SliceState(internal.Int32SliceWrapper(ms))
}
// NewInt32Slice creates a new empty Int32Slice.
func NewInt32Slice() Int32Slice {
orig := []int32(nil)
- return Int32Slice(internal.NewInt32Slice(&orig, internal.NewState()))
+ return Int32Slice(internal.NewInt32SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int32 slice.
func (ms Int32Slice) AsRaw() []int32 {
- return internal.CopyOrigInt32Slice(nil, *ms.getOrig())
+ return copyInt32Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int32 into the slice Int32Slice.
func (ms Int32Slice) FromRaw(val []int32) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigInt32Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyInt32Slice(*ms.getOrig(), val)
}
// Len returns length of the []int32 slice value.
@@ -127,16 +127,42 @@ func (ms Int32Slice) MoveAndAppendTo(dest Int32Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms Int32Slice) RemoveIf(f func(int32) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero int32
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int32Slice) CopyTo(dest Int32Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = internal.CopyOrigInt32Slice(*dest.getOrig(), *ms.getOrig())
+ *dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int32Slice
func (ms Int32Slice) Equal(val Int32Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyInt32Slice(dst, src []int32) []int32 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
index 8a2607bf3..4c22f2ed8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
@@ -18,31 +18,31 @@ import (
//
// Must use NewInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Int64Slice internal.Int64Slice
+type Int64Slice internal.Int64SliceWrapper
func (ms Int64Slice) getOrig() *[]int64 {
- return internal.GetOrigInt64Slice(internal.Int64Slice(ms))
+ return internal.GetInt64SliceOrig(internal.Int64SliceWrapper(ms))
}
func (ms Int64Slice) getState() *internal.State {
- return internal.GetInt64SliceState(internal.Int64Slice(ms))
+ return internal.GetInt64SliceState(internal.Int64SliceWrapper(ms))
}
// NewInt64Slice creates a new empty Int64Slice.
func NewInt64Slice() Int64Slice {
orig := []int64(nil)
- return Int64Slice(internal.NewInt64Slice(&orig, internal.NewState()))
+ return Int64Slice(internal.NewInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int64 slice.
func (ms Int64Slice) AsRaw() []int64 {
- return internal.CopyOrigInt64Slice(nil, *ms.getOrig())
+ return copyInt64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int64 into the slice Int64Slice.
func (ms Int64Slice) FromRaw(val []int64) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigInt64Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyInt64Slice(*ms.getOrig(), val)
}
// Len returns length of the []int64 slice value.
@@ -127,16 +127,42 @@ func (ms Int64Slice) MoveAndAppendTo(dest Int64Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms Int64Slice) RemoveIf(f func(int64) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero int64
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int64Slice) CopyTo(dest Int64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = internal.CopyOrigInt64Slice(*dest.getOrig(), *ms.getOrig())
+ *dest.getOrig() = copyInt64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int64Slice
func (ms Int64Slice) Equal(val Int64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyInt64Slice(dst, src []int64) []int64 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go
index 66cd5156d..4f767693b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go
@@ -8,7 +8,6 @@ package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
)
// Resource is a message representing the resource information.
@@ -18,10 +17,10 @@ import (
//
// Must use NewResource function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Resource internal.Resource
+type Resource internal.ResourceWrapper
-func newResource(orig *otlpresource.Resource, state *internal.State) Resource {
- return Resource(internal.NewResource(orig, state))
+func newResource(orig *internal.Resource, state *internal.State) Resource {
+ return Resource(internal.NewResourceWrapper(orig, state))
}
// NewResource creates a new empty Resource.
@@ -29,7 +28,7 @@ func newResource(orig *otlpresource.Resource, state *internal.State) Resource {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResource() Resource {
- return newResource(internal.NewOrigResource(), internal.NewState())
+ return newResource(internal.NewResource(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -41,13 +40,13 @@ func (ms Resource) MoveTo(dest Resource) {
if ms.getOrig() == dest.getOrig() {
return
}
- internal.DeleteOrigResource(dest.getOrig(), false)
+ internal.DeleteResource(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// Attributes returns the Attributes associated with this Resource.
func (ms Resource) Attributes() Map {
- return Map(internal.NewMap(&ms.getOrig().Attributes, ms.getState()))
+ return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Resource.
@@ -64,13 +63,13 @@ func (ms Resource) SetDroppedAttributesCount(v uint32) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Resource) CopyTo(dest Resource) {
dest.getState().AssertMutable()
- internal.CopyOrigResource(dest.getOrig(), ms.getOrig())
+ internal.CopyResource(dest.getOrig(), ms.getOrig())
}
-func (ms Resource) getOrig() *otlpresource.Resource {
- return internal.GetOrigResource(internal.Resource(ms))
+func (ms Resource) getOrig() *internal.Resource {
+ return internal.GetResourceOrig(internal.ResourceWrapper(ms))
}
func (ms Resource) getState() *internal.State {
- return internal.GetResourceState(internal.Resource(ms))
+ return internal.GetResourceState(internal.ResourceWrapper(ms))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go
index c60caaa33..0a28bde3a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go
@@ -10,7 +10,6 @@ import (
"iter"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// Slice logically represents a slice of Value.
@@ -20,16 +19,16 @@ import (
//
// Must use NewSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Slice internal.Slice
+type Slice internal.SliceWrapper
-func newSlice(orig *[]otlpcommon.AnyValue, state *internal.State) Slice {
- return Slice(internal.NewSlice(orig, state))
+func newSlice(orig *[]internal.AnyValue, state *internal.State) Slice {
+ return Slice(internal.NewSliceWrapper(orig, state))
}
-// NewSlice creates a Slice with 0 elements.
+// NewSlice creates a SliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSlice() Slice {
- orig := []otlpcommon.AnyValue(nil)
+ orig := []internal.AnyValue(nil)
return newSlice(&orig, internal.NewState())
}
@@ -86,7 +85,7 @@ func (es Slice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]otlpcommon.AnyValue, len(*es.getOrig()), newCap)
+ newOrig := make([]internal.AnyValue, len(*es.getOrig()), newCap)
copy(newOrig, *es.getOrig())
*es.getOrig() = newOrig
}
@@ -95,7 +94,7 @@ func (es Slice) EnsureCapacity(newCap int) {
// It returns the newly added Value.
func (es Slice) AppendEmpty() Value {
es.getState().AssertMutable()
- *es.getOrig() = append(*es.getOrig(), otlpcommon.AnyValue{})
+ *es.getOrig() = append(*es.getOrig(), internal.AnyValue{})
return es.At(es.Len() - 1)
}
@@ -124,7 +123,7 @@ func (es Slice) RemoveIf(f func(Value) bool) {
newLen := 0
for i := 0; i < len(*es.getOrig()); i++ {
if f(es.At(i)) {
- internal.DeleteOrigAnyValue(&(*es.getOrig())[i], false)
+ internal.DeleteAnyValue(&(*es.getOrig())[i], false)
continue
}
if newLen == i {
@@ -145,13 +144,13 @@ func (es Slice) CopyTo(dest Slice) {
if es.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = internal.CopyOrigAnyValueSlice(*dest.getOrig(), *es.getOrig())
+ *dest.getOrig() = internal.CopyAnyValueSlice(*dest.getOrig(), *es.getOrig())
}
-func (ms Slice) getOrig() *[]otlpcommon.AnyValue {
- return internal.GetOrigSlice(internal.Slice(ms))
+func (ms Slice) getOrig() *[]internal.AnyValue {
+ return internal.GetSliceOrig(internal.SliceWrapper(ms))
}
func (ms Slice) getState() *internal.State {
- return internal.GetSliceState(internal.Slice(ms))
+ return internal.GetSliceState(internal.SliceWrapper(ms))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
index 685f51af9..ff8422805 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
@@ -18,31 +18,31 @@ import (
//
// Must use NewStringSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type StringSlice internal.StringSlice
+type StringSlice internal.StringSliceWrapper
func (ms StringSlice) getOrig() *[]string {
- return internal.GetOrigStringSlice(internal.StringSlice(ms))
+ return internal.GetStringSliceOrig(internal.StringSliceWrapper(ms))
}
func (ms StringSlice) getState() *internal.State {
- return internal.GetStringSliceState(internal.StringSlice(ms))
+ return internal.GetStringSliceState(internal.StringSliceWrapper(ms))
}
// NewStringSlice creates a new empty StringSlice.
func NewStringSlice() StringSlice {
orig := []string(nil)
- return StringSlice(internal.NewStringSlice(&orig, internal.NewState()))
+ return StringSlice(internal.NewStringSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []string slice.
func (ms StringSlice) AsRaw() []string {
- return internal.CopyOrigStringSlice(nil, *ms.getOrig())
+ return copyStringSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []string into the slice StringSlice.
func (ms StringSlice) FromRaw(val []string) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigStringSlice(*ms.getOrig(), val)
+ *ms.getOrig() = copyStringSlice(*ms.getOrig(), val)
}
// Len returns length of the []string slice value.
@@ -127,16 +127,42 @@ func (ms StringSlice) MoveAndAppendTo(dest StringSlice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms StringSlice) RemoveIf(f func(string) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero string
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms StringSlice) CopyTo(dest StringSlice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = internal.CopyOrigStringSlice(*dest.getOrig(), *ms.getOrig())
+ *dest.getOrig() = copyStringSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another StringSlice
func (ms StringSlice) Equal(val StringSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyStringSlice(dst, src []string) []string {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
index 3a76ab3ba..22539d987 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
@@ -18,31 +18,31 @@ import (
//
// Must use NewUInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type UInt64Slice internal.UInt64Slice
+type UInt64Slice internal.UInt64SliceWrapper
func (ms UInt64Slice) getOrig() *[]uint64 {
- return internal.GetOrigUInt64Slice(internal.UInt64Slice(ms))
+ return internal.GetUInt64SliceOrig(internal.UInt64SliceWrapper(ms))
}
func (ms UInt64Slice) getState() *internal.State {
- return internal.GetUInt64SliceState(internal.UInt64Slice(ms))
+ return internal.GetUInt64SliceState(internal.UInt64SliceWrapper(ms))
}
// NewUInt64Slice creates a new empty UInt64Slice.
func NewUInt64Slice() UInt64Slice {
orig := []uint64(nil)
- return UInt64Slice(internal.NewUInt64Slice(&orig, internal.NewState()))
+ return UInt64Slice(internal.NewUInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []uint64 slice.
func (ms UInt64Slice) AsRaw() []uint64 {
- return internal.CopyOrigUint64Slice(nil, *ms.getOrig())
+ return copyUint64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []uint64 into the slice UInt64Slice.
func (ms UInt64Slice) FromRaw(val []uint64) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigUint64Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyUint64Slice(*ms.getOrig(), val)
}
// Len returns length of the []uint64 slice value.
@@ -127,16 +127,42 @@ func (ms UInt64Slice) MoveAndAppendTo(dest UInt64Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms UInt64Slice) RemoveIf(f func(uint64) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero uint64
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms UInt64Slice) CopyTo(dest UInt64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = internal.CopyOrigUint64Slice(*dest.getOrig(), *ms.getOrig())
+ *dest.getOrig() = copyUint64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another UInt64Slice
func (ms UInt64Slice) Equal(val UInt64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyUint64Slice(dst, src []uint64) []uint64 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
index d23f53a03..b5d94967c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
@@ -9,31 +9,30 @@ import (
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// Map stores a map of string keys to elements of Value type.
//
// Must use NewMap function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Map internal.Map
+type Map internal.MapWrapper
// NewMap creates a Map with 0 elements.
func NewMap() Map {
- orig := []otlpcommon.KeyValue(nil)
- return Map(internal.NewMap(&orig, internal.NewState()))
+ orig := []internal.KeyValue(nil)
+ return Map(internal.NewMapWrapper(&orig, internal.NewState()))
}
-func (m Map) getOrig() *[]otlpcommon.KeyValue {
- return internal.GetOrigMap(internal.Map(m))
+func (m Map) getOrig() *[]internal.KeyValue {
+ return internal.GetMapOrig(internal.MapWrapper(m))
}
func (m Map) getState() *internal.State {
- return internal.GetMapState(internal.Map(m))
+ return internal.GetMapState(internal.MapWrapper(m))
}
-func newMap(orig *[]otlpcommon.KeyValue, state *internal.State) Map {
- return Map(internal.NewMap(orig, state))
+func newMap(orig *[]internal.KeyValue, state *internal.State) Map {
+ return Map(internal.NewMapWrapper(orig, state))
}
// Clear erases any existing entries in this Map instance.
@@ -50,7 +49,7 @@ func (m Map) EnsureCapacity(capacity int) {
if capacity <= cap(oldOrig) {
return
}
- *m.getOrig() = make([]otlpcommon.KeyValue, len(oldOrig), capacity)
+ *m.getOrig() = make([]internal.KeyValue, len(oldOrig), capacity)
copy(*m.getOrig(), oldOrig)
}
@@ -94,7 +93,7 @@ func (m Map) RemoveIf(f func(string, Value) bool) {
newLen := 0
for i := 0; i < len(*m.getOrig()); i++ {
if f((*m.getOrig())[i].Key, newValue(&(*m.getOrig())[i].Value, m.getState())) {
- (*m.getOrig())[i] = otlpcommon.KeyValue{}
+ (*m.getOrig())[i] = internal.KeyValue{}
continue
}
if newLen == i {
@@ -103,7 +102,7 @@ func (m Map) RemoveIf(f func(string, Value) bool) {
continue
}
(*m.getOrig())[newLen] = (*m.getOrig())[i]
- (*m.getOrig())[i] = otlpcommon.KeyValue{}
+ (*m.getOrig())[i] = internal.KeyValue{}
newLen++
}
*m.getOrig() = (*m.getOrig())[:newLen]
@@ -117,10 +116,22 @@ func (m Map) PutEmpty(k string) Value {
av.getOrig().Value = nil
return newValue(av.getOrig(), m.getState())
}
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k})
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k})
return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState())
}
+// GetOrPutEmpty returns the Value associated with the key and true (loaded) if the key exists in the map,
+// otherwise inserts an empty value to the map under the given key and returns the inserted value
+// and false (loaded).
+func (m Map) GetOrPutEmpty(k string) (Value, bool) {
+ m.getState().AssertMutable()
+ if av, existing := m.Get(k); existing {
+ return av, true
+ }
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k})
+ return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState()), false
+}
+
// PutStr performs the Insert or Update action. The Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
@@ -130,9 +141,9 @@ func (m Map) PutStr(k, v string) {
av.SetStr(v)
return
}
- ov := internal.NewOrigAnyValueStringValue()
+ ov := internal.NewAnyValueStringValue()
ov.StringValue = v
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutInt performs the Insert or Update action. The int Value is
@@ -144,9 +155,9 @@ func (m Map) PutInt(k string, v int64) {
av.SetInt(v)
return
}
- ov := internal.NewOrigAnyValueIntValue()
+ ov := internal.NewAnyValueIntValue()
ov.IntValue = v
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutDouble performs the Insert or Update action. The double Value is
@@ -158,9 +169,9 @@ func (m Map) PutDouble(k string, v float64) {
av.SetDouble(v)
return
}
- ov := internal.NewOrigAnyValueDoubleValue()
+ ov := internal.NewAnyValueDoubleValue()
ov.DoubleValue = v
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutBool performs the Insert or Update action. The bool Value is
@@ -172,9 +183,9 @@ func (m Map) PutBool(k string, v bool) {
av.SetBool(v)
return
}
- ov := internal.NewOrigAnyValueBoolValue()
+ ov := internal.NewAnyValueBoolValue()
ov.BoolValue = v
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutEmptyBytes inserts or updates an empty byte slice under given key and returns it.
@@ -183,9 +194,9 @@ func (m Map) PutEmptyBytes(k string) ByteSlice {
if av, existing := m.Get(k); existing {
return av.SetEmptyBytes()
}
- ov := internal.NewOrigAnyValueBytesValue()
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
- return ByteSlice(internal.NewByteSlice(&ov.BytesValue, m.getState()))
+ ov := internal.NewAnyValueBytesValue()
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
+ return ByteSlice(internal.NewByteSliceWrapper(&ov.BytesValue, m.getState()))
}
// PutEmptyMap inserts or updates an empty map under given key and returns it.
@@ -194,10 +205,10 @@ func (m Map) PutEmptyMap(k string) Map {
if av, existing := m.Get(k); existing {
return av.SetEmptyMap()
}
- ov := internal.NewOrigAnyValueKvlistValue()
- ov.KvlistValue = internal.NewOrigKeyValueList()
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
- return Map(internal.NewMap(&ov.KvlistValue.Values, m.getState()))
+ ov := internal.NewAnyValueKvlistValue()
+ ov.KvlistValue = internal.NewKeyValueList()
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
+ return Map(internal.NewMapWrapper(&ov.KvlistValue.Values, m.getState()))
}
// PutEmptySlice inserts or updates an empty slice under given key and returns it.
@@ -206,10 +217,10 @@ func (m Map) PutEmptySlice(k string) Slice {
if av, existing := m.Get(k); existing {
return av.SetEmptySlice()
}
- ov := internal.NewOrigAnyValueArrayValue()
- ov.ArrayValue = internal.NewOrigArrayValue()
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
- return Slice(internal.NewSlice(&ov.ArrayValue.Values, m.getState()))
+ ov := internal.NewAnyValueArrayValue()
+ ov.ArrayValue = internal.NewArrayValue()
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
+ return Slice(internal.NewSliceWrapper(&ov.ArrayValue.Values, m.getState()))
}
// Len returns the length of this map.
@@ -230,7 +241,7 @@ func (m Map) Len() int {
func (m Map) Range(f func(k string, v Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
- if !f(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) {
+ if !f(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
break
}
}
@@ -245,7 +256,7 @@ func (m Map) All() iter.Seq2[string, Value] {
return func(yield func(string, Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
- if !yield(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) {
+ if !yield(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
return
}
}
@@ -268,7 +279,10 @@ func (m Map) MoveTo(dest Map) {
// CopyTo copies all elements from the current map overriding the destination.
func (m Map) CopyTo(dest Map) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigKeyValueSlice(*dest.getOrig(), *m.getOrig())
+ if m.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = internal.CopyKeyValueSlice(*dest.getOrig(), *m.getOrig())
}
// AsRaw returns a standard go map representation of this Map.
@@ -290,7 +304,7 @@ func (m Map) FromRaw(rawMap map[string]any) error {
}
var errs error
- origs := make([]otlpcommon.KeyValue, len(rawMap))
+ origs := make([]internal.KeyValue, len(rawMap))
ix := 0
for k, iv := range rawMap {
origs[ix].Key = k
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
index d4276a1d8..380fc5c1b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
@@ -6,7 +6,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"go.uber.org/multierr"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// AsRaw return []any copy of the Slice.
@@ -26,7 +26,7 @@ func (es Slice) FromRaw(rawSlice []any) error {
return nil
}
var errs error
- origs := make([]otlpcommon.AnyValue, len(rawSlice))
+ origs := make([]internal.AnyValue, len(rawSlice))
for ix, iv := range rawSlice {
errs = multierr.Append(errs, newValue(&origs[ix], es.getState()).FromRaw(iv))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go
index 63399cb58..853c1a2d0 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go
@@ -5,7 +5,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
- "go.opentelemetry.io/collector/pdata/internal/data"
+ "go.opentelemetry.io/collector/pdata/internal"
)
var emptySpanID = SpanID([8]byte{})
@@ -32,5 +32,5 @@ func (ms SpanID) String() string {
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms SpanID) IsEmpty() bool {
- return data.SpanID(ms).IsEmpty()
+ return internal.SpanID(ms).IsEmpty()
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go
index a78f4ff0c..167f7a327 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go
@@ -11,18 +11,18 @@ import (
//
// Must use NewTraceState function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type TraceState internal.TraceState
+type TraceState internal.TraceStateWrapper
func NewTraceState() TraceState {
- return TraceState(internal.NewTraceState(new(string), internal.NewState()))
+ return TraceState(internal.NewTraceStateWrapper(new(string), internal.NewState()))
}
func (ms TraceState) getOrig() *string {
- return internal.GetOrigTraceState(internal.TraceState(ms))
+ return internal.GetTraceStateOrig(internal.TraceStateWrapper(ms))
}
func (ms TraceState) getState() *internal.State {
- return internal.GetTraceStateState(internal.TraceState(ms))
+ return internal.GetTraceStateState(internal.TraceStateWrapper(ms))
}
// AsRaw returns the string representation of the tracestate in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go
index 22ad5a5af..fd1df45d2 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go
@@ -6,7 +6,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
- "go.opentelemetry.io/collector/pdata/internal/data"
+ "go.opentelemetry.io/collector/pdata/internal"
)
var emptyTraceID = TraceID([16]byte{})
@@ -33,5 +33,5 @@ func (ms TraceID) String() string {
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms TraceID) IsEmpty() bool {
- return data.TraceID(ms).IsEmpty()
+ return internal.TraceID(ms).IsEmpty()
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
index ad16e6173..f74fa7946 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
@@ -11,7 +11,6 @@ import (
"strconv"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// ValueType specifies the type of Value.
@@ -67,85 +66,85 @@ func (avt ValueType) String() string {
//
// Important: zero-initialized instance is not valid for use. All Value functions below must
// be called only on instances that are created via NewValue+ functions.
-type Value internal.Value
+type Value internal.ValueWrapper
// NewValueEmpty creates a new Value with an empty value.
func NewValueEmpty() Value {
- return newValue(&otlpcommon.AnyValue{}, internal.NewState())
+ return newValue(&internal.AnyValue{}, internal.NewState())
}
// NewValueStr creates a new Value with the given string value.
func NewValueStr(v string) Value {
- ov := internal.NewOrigAnyValueStringValue()
+ ov := internal.NewAnyValueStringValue()
ov.StringValue = v
- orig := internal.NewOrigAnyValue()
+ orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueInt creates a new Value with the given int64 value.
func NewValueInt(v int64) Value {
- ov := internal.NewOrigAnyValueIntValue()
+ ov := internal.NewAnyValueIntValue()
ov.IntValue = v
- orig := internal.NewOrigAnyValue()
+ orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueDouble creates a new Value with the given float64 value.
func NewValueDouble(v float64) Value {
- ov := internal.NewOrigAnyValueDoubleValue()
+ ov := internal.NewAnyValueDoubleValue()
ov.DoubleValue = v
- orig := internal.NewOrigAnyValue()
+ orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueBool creates a new Value with the given bool value.
func NewValueBool(v bool) Value {
- ov := internal.NewOrigAnyValueBoolValue()
+ ov := internal.NewAnyValueBoolValue()
ov.BoolValue = v
- orig := internal.NewOrigAnyValue()
+ orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueMap creates a new Value of map type.
func NewValueMap() Value {
- ov := internal.NewOrigAnyValueKvlistValue()
- ov.KvlistValue = internal.NewOrigKeyValueList()
- orig := internal.NewOrigAnyValue()
+ ov := internal.NewAnyValueKvlistValue()
+ ov.KvlistValue = internal.NewKeyValueList()
+ orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueSlice creates a new Value of array type.
func NewValueSlice() Value {
- ov := internal.NewOrigAnyValueArrayValue()
- ov.ArrayValue = internal.NewOrigArrayValue()
- orig := internal.NewOrigAnyValue()
+ ov := internal.NewAnyValueArrayValue()
+ ov.ArrayValue = internal.NewArrayValue()
+ orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueBytes creates a new empty Value of byte type.
func NewValueBytes() Value {
- ov := internal.NewOrigAnyValueBytesValue()
- orig := internal.NewOrigAnyValue()
+ ov := internal.NewAnyValueBytesValue()
+ orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
-func newValue(orig *otlpcommon.AnyValue, state *internal.State) Value {
- return Value(internal.NewValue(orig, state))
+func newValue(orig *internal.AnyValue, state *internal.State) Value {
+ return Value(internal.NewValueWrapper(orig, state))
}
-func (v Value) getOrig() *otlpcommon.AnyValue {
- return internal.GetOrigValue(internal.Value(v))
+func (v Value) getOrig() *internal.AnyValue {
+ return internal.GetValueOrig(internal.ValueWrapper(v))
}
func (v Value) getState() *internal.State {
- return internal.GetValueState(internal.Value(v))
+ return internal.GetValueState(internal.ValueWrapper(v))
}
// FromRaw sets the value from the given raw value.
@@ -200,19 +199,19 @@ func (v Value) FromRaw(iv any) error {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) Type() ValueType {
switch v.getOrig().Value.(type) {
- case *otlpcommon.AnyValue_StringValue:
+ case *internal.AnyValue_StringValue:
return ValueTypeStr
- case *otlpcommon.AnyValue_BoolValue:
+ case *internal.AnyValue_BoolValue:
return ValueTypeBool
- case *otlpcommon.AnyValue_IntValue:
+ case *internal.AnyValue_IntValue:
return ValueTypeInt
- case *otlpcommon.AnyValue_DoubleValue:
+ case *internal.AnyValue_DoubleValue:
return ValueTypeDouble
- case *otlpcommon.AnyValue_KvlistValue:
+ case *internal.AnyValue_KvlistValue:
return ValueTypeMap
- case *otlpcommon.AnyValue_ArrayValue:
+ case *internal.AnyValue_ArrayValue:
return ValueTypeSlice
- case *otlpcommon.AnyValue_BytesValue:
+ case *internal.AnyValue_BytesValue:
return ValueTypeBytes
}
return ValueTypeEmpty
@@ -251,7 +250,7 @@ func (v Value) Map() Map {
if kvlist == nil {
return Map{}
}
- return newMap(&kvlist.Values, internal.GetValueState(internal.Value(v)))
+ return newMap(&kvlist.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Slice returns the slice value associated with this Value.
@@ -262,18 +261,18 @@ func (v Value) Slice() Slice {
if arr == nil {
return Slice{}
}
- return newSlice(&arr.Values, internal.GetValueState(internal.Value(v)))
+ return newSlice(&arr.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Bytes returns the ByteSlice value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeBytes
// then returns an invalid ByteSlice object. Note that using such slice can cause panic.
func (v Value) Bytes() ByteSlice {
- bv, ok := v.getOrig().GetValue().(*otlpcommon.AnyValue_BytesValue)
+ bv, ok := v.getOrig().GetValue().(*internal.AnyValue_BytesValue)
if !ok {
return ByteSlice{}
}
- return ByteSlice(internal.NewByteSlice(&bv.BytesValue, internal.GetValueState(internal.Value(v))))
+ return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, internal.GetValueState(internal.ValueWrapper(v))))
}
// SetStr replaces the string value associated with this Value,
@@ -284,8 +283,8 @@ func (v Value) Bytes() ByteSlice {
func (v Value) SetStr(sv string) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
- internal.DeleteOrigAnyValue(v.getOrig(), false)
- ov := internal.NewOrigAnyValueStringValue()
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueStringValue()
ov.StringValue = sv
v.getOrig().Value = ov
}
@@ -296,8 +295,8 @@ func (v Value) SetStr(sv string) {
func (v Value) SetInt(iv int64) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
- internal.DeleteOrigAnyValue(v.getOrig(), false)
- ov := internal.NewOrigAnyValueIntValue()
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueIntValue()
ov.IntValue = iv
v.getOrig().Value = ov
}
@@ -308,8 +307,8 @@ func (v Value) SetInt(iv int64) {
func (v Value) SetDouble(dv float64) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
- internal.DeleteOrigAnyValue(v.getOrig(), false)
- ov := internal.NewOrigAnyValueDoubleValue()
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueDoubleValue()
ov.DoubleValue = dv
v.getOrig().Value = ov
}
@@ -320,8 +319,8 @@ func (v Value) SetDouble(dv float64) {
func (v Value) SetBool(bv bool) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
- internal.DeleteOrigAnyValue(v.getOrig(), false)
- ov := internal.NewOrigAnyValueBoolValue()
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueBoolValue()
ov.BoolValue = bv
v.getOrig().Value = ov
}
@@ -331,10 +330,10 @@ func (v Value) SetBool(bv bool) {
func (v Value) SetEmptyBytes() ByteSlice {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
- internal.DeleteOrigAnyValue(v.getOrig(), false)
- bv := internal.NewOrigAnyValueBytesValue()
+ internal.DeleteAnyValue(v.getOrig(), false)
+ bv := internal.NewAnyValueBytesValue()
v.getOrig().Value = bv
- return ByteSlice(internal.NewByteSlice(&bv.BytesValue, v.getState()))
+ return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, v.getState()))
}
// SetEmptyMap sets value to an empty map and returns it.
@@ -342,9 +341,9 @@ func (v Value) SetEmptyBytes() ByteSlice {
func (v Value) SetEmptyMap() Map {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
- internal.DeleteOrigAnyValue(v.getOrig(), false)
- ov := internal.NewOrigAnyValueKvlistValue()
- ov.KvlistValue = internal.NewOrigKeyValueList()
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueKvlistValue()
+ ov.KvlistValue = internal.NewKeyValueList()
v.getOrig().Value = ov
return newMap(&ov.KvlistValue.Values, v.getState())
}
@@ -354,9 +353,9 @@ func (v Value) SetEmptyMap() Map {
func (v Value) SetEmptySlice() Slice {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
- internal.DeleteOrigAnyValue(v.getOrig(), false)
- ov := internal.NewOrigAnyValueArrayValue()
- ov.ArrayValue = internal.NewOrigArrayValue()
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueArrayValue()
+ ov.ArrayValue = internal.NewArrayValue()
v.getOrig().Value = ov
return newSlice(&ov.ArrayValue.Values, v.getState())
}
@@ -379,7 +378,7 @@ func (v Value) MoveTo(dest Value) {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) CopyTo(dest Value) {
dest.getState().AssertMutable()
- internal.CopyOrigAnyValue(dest.getOrig(), v.getOrig())
+ internal.CopyAnyValue(dest.getOrig(), v.getOrig())
}
// AsString converts an OTLP Value object of any type to its equivalent string
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE b/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile b/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile
new file mode 100644
index 000000000..ded7a3609
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile
@@ -0,0 +1 @@
+include ../../Makefile.Common
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/aggregation_temporality.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/aggregation_temporality.go
new file mode 100644
index 000000000..ca4a52d84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/aggregation_temporality.go
@@ -0,0 +1,35 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// AggregationTemporality specifies the method of aggregating metric values,
+// either DELTA (change since last report) or CUMULATIVE (total since a fixed
+// start time).
+type AggregationTemporality int32
+
+const (
+ // AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used.
+ AggregationTemporalityUnspecified = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED)
+ // AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time.
+ AggregationTemporalityDelta = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA)
+ // AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time.
+ AggregationTemporalityCumulative = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE)
+)
+
+// String returns the string representation of the AggregationTemporality.
+func (at AggregationTemporality) String() string {
+ switch at {
+ case AggregationTemporalityUnspecified:
+ return "Unspecified"
+ case AggregationTemporalityDelta:
+ return "Delta"
+ case AggregationTemporalityCumulative:
+ return "Cumulative"
+ }
+ return ""
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/attributes.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/attributes.go
new file mode 100644
index 000000000..8e70cea8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/attributes.go
@@ -0,0 +1,54 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "errors"
+ "math"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+type attributable interface {
+ AttributeIndices() pcommon.Int32Slice
+}
+
+// FromAttributeIndices builds a [pcommon.Map] containing the attributes of a
+// record.
+// The record can be any struct that implements an `AttributeIndices` method.
+// Updates made to the return map will not be applied back to the record.
+func FromAttributeIndices(table KeyValueAndUnitSlice, record attributable, dic ProfilesDictionary) pcommon.Map {
+ m := pcommon.NewMap()
+ m.EnsureCapacity(record.AttributeIndices().Len())
+
+ for i := 0; i < record.AttributeIndices().Len(); i++ {
+ kv := table.At(int(record.AttributeIndices().At(i)))
+ key := dic.StringTable().At(int(kv.KeyStrindex()))
+ kv.Value().CopyTo(m.PutEmpty(key))
+ }
+
+ return m
+}
+
+var errTooManyAttributeTableEntries = errors.New("too many entries in AttributeTable")
+
+// SetAttribute updates an AttributeTable, adding or providing a value and
+// returns its index.
+func SetAttribute(table KeyValueAndUnitSlice, attr KeyValueAndUnit) (int32, error) {
+ for j, a := range table.All() {
+ if a.Equal(attr) {
+ if j > math.MaxInt32 {
+ return 0, errTooManyAttributeTableEntries
+ }
+ return int32(j), nil //nolint:gosec // G115 overflow checked
+ }
+ }
+
+ if table.Len() >= math.MaxInt32 {
+ return 0, errTooManyAttributeTableEntries
+ }
+
+ attr.CopyTo(table.AppendEmpty())
+ return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go
new file mode 100644
index 000000000..ed37cd6f6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// MarshalSizer is the interface that groups the basic Marshal and Size methods
+type MarshalSizer interface {
+ Marshaler
+ Sizer
+}
+
+// Marshaler marshals pprofile.Profiles into bytes.
+type Marshaler interface {
+ // MarshalProfiles the given pprofile.Profiles into bytes.
+ // If the error is not nil, the returned bytes slice cannot be used.
+ MarshalProfiles(td Profiles) ([]byte, error)
+}
+
+// Unmarshaler unmarshalls bytes into pprofile.Profiles.
+type Unmarshaler interface {
+ // UnmarshalProfiles the given bytes into pprofile.Profiles.
+ // If the error is not nil, the returned pprofile.Profiles cannot be used.
+ UnmarshalProfiles(buf []byte) (Profiles, error)
+}
+
+// Sizer is an optional interface implemented by the Marshaler,
+// that calculates the size of a marshaled Profiles.
+type Sizer interface {
+ // ProfilesSize returns the size in bytes of a marshaled Profiles.
+ ProfilesSize(td Profiles) int
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/function.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/function.go
new file mode 100644
index 000000000..4da4e6053
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/function.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// Equal checks equality with another Function
+func (fn Function) Equal(val Function) bool {
+ return fn.NameStrindex() == val.NameStrindex() &&
+ fn.SystemNameStrindex() == val.SystemNameStrindex() &&
+ fn.FilenameStrindex() == val.FilenameStrindex() &&
+ fn.StartLine() == val.StartLine()
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/functions.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/functions.go
new file mode 100644
index 000000000..b2ce057bd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/functions.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "errors"
+ "math"
+)
+
+var errTooManyFunctionTableEntries = errors.New("too many entries in FunctionTable")
+
+// SetFunction updates a FunctionTable, adding or providing a value and returns
+// its index.
+func SetFunction(table FunctionSlice, fn Function) (int32, error) {
+ for j, m := range table.All() {
+ if m.Equal(fn) {
+ if j > math.MaxInt32 {
+ return 0, errTooManyFunctionTableEntries
+ }
+ return int32(j), nil //nolint:gosec // G115 overflow checked
+ }
+ }
+
+ if table.Len() >= math.MaxInt32 {
+ return 0, errTooManyFunctionTableEntries
+ }
+
+ fn.CopyTo(table.AppendEmpty())
+ return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go
new file mode 100644
index 000000000..a8d52b479
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go
@@ -0,0 +1,98 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewFunction function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Function struct {
+ orig *internal.Function
+ state *internal.State
+}
+
+func newFunction(orig *internal.Function, state *internal.State) Function {
+ return Function{orig: orig, state: state}
+}
+
+// NewFunction creates a new empty Function.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewFunction() Function {
+ return newFunction(internal.NewFunction(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Function) MoveTo(dest Function) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteFunction(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// NameStrindex returns the namestrindex associated with this Function.
+func (ms Function) NameStrindex() int32 {
+ return ms.orig.NameStrindex
+}
+
+// SetNameStrindex replaces the namestrindex associated with this Function.
+func (ms Function) SetNameStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.NameStrindex = v
+}
+
+// SystemNameStrindex returns the systemnamestrindex associated with this Function.
+func (ms Function) SystemNameStrindex() int32 {
+ return ms.orig.SystemNameStrindex
+}
+
+// SetSystemNameStrindex replaces the systemnamestrindex associated with this Function.
+func (ms Function) SetSystemNameStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.SystemNameStrindex = v
+}
+
+// FilenameStrindex returns the filenamestrindex associated with this Function.
+func (ms Function) FilenameStrindex() int32 {
+ return ms.orig.FilenameStrindex
+}
+
+// SetFilenameStrindex replaces the filenamestrindex associated with this Function.
+func (ms Function) SetFilenameStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.FilenameStrindex = v
+}
+
+// StartLine returns the startline associated with this Function.
+func (ms Function) StartLine() int64 {
+ return ms.orig.StartLine
+}
+
+// SetStartLine replaces the startline associated with this Function.
+func (ms Function) SetStartLine(v int64) {
+ ms.state.AssertMutable()
+ ms.orig.StartLine = v
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Function) CopyTo(dest Function) {
+ dest.state.AssertMutable()
+ internal.CopyFunction(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go
new file mode 100644
index 000000000..cc8f07377
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// FunctionSlice logically represents a slice of Function.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewFunctionSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type FunctionSlice struct {
+ orig *[]*internal.Function
+ state *internal.State
+}
+
+func newFunctionSlice(orig *[]*internal.Function, state *internal.State) FunctionSlice {
+ return FunctionSlice{orig: orig, state: state}
+}
+
+// NewFunctionSlice creates a FunctionSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewFunctionSlice() FunctionSlice {
+ orig := []*internal.Function(nil)
+ return newFunctionSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewFunctionSlice()".
+func (es FunctionSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es FunctionSlice) At(i int) Function {
+ return newFunction((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es FunctionSlice) All() iter.Seq2[int, Function] {
+ return func(yield func(int, Function) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new FunctionSlice can be initialized:
+//
+// es := NewFunctionSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es FunctionSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Function, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Function.
+// It returns the newly added Function.
+func (es FunctionSlice) AppendEmpty() Function {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewFunction())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es FunctionSlice) MoveAndAppendTo(dest FunctionSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es FunctionSlice) RemoveIf(f func(Function) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteFunction((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es FunctionSlice) CopyTo(dest FunctionSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyFunctionPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Function elements within FunctionSlice given the
+// provided less function so that two instances of FunctionSlice
+// can be compared.
+func (es FunctionSlice) Sort(less func(a, b Function) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunit.go
new file mode 100644
index 000000000..15eac86a6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunit.go
@@ -0,0 +1,84 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// KeyValueAndUnit represents a custom 'dictionary native'
+// style of encoding attributes which is more convenient
+// for profiles than opentelemetry.proto.common.v1.KeyValue.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewKeyValueAndUnit function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type KeyValueAndUnit struct {
+ orig *internal.KeyValueAndUnit
+ state *internal.State
+}
+
+func newKeyValueAndUnit(orig *internal.KeyValueAndUnit, state *internal.State) KeyValueAndUnit {
+ return KeyValueAndUnit{orig: orig, state: state}
+}
+
+// NewKeyValueAndUnit creates a new empty KeyValueAndUnit.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewKeyValueAndUnit() KeyValueAndUnit {
+ return newKeyValueAndUnit(internal.NewKeyValueAndUnit(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms KeyValueAndUnit) MoveTo(dest KeyValueAndUnit) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteKeyValueAndUnit(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// KeyStrindex returns the keystrindex associated with this KeyValueAndUnit.
+func (ms KeyValueAndUnit) KeyStrindex() int32 {
+ return ms.orig.KeyStrindex
+}
+
+// SetKeyStrindex replaces the keystrindex associated with this KeyValueAndUnit.
+func (ms KeyValueAndUnit) SetKeyStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.KeyStrindex = v
+}
+
+// Value returns the value associated with this KeyValueAndUnit.
+func (ms KeyValueAndUnit) Value() pcommon.Value {
+ return pcommon.Value(internal.NewValueWrapper(&ms.orig.Value, ms.state))
+}
+
+// UnitStrindex returns the unitstrindex associated with this KeyValueAndUnit.
+func (ms KeyValueAndUnit) UnitStrindex() int32 {
+ return ms.orig.UnitStrindex
+}
+
+// SetUnitStrindex replaces the unitstrindex associated with this KeyValueAndUnit.
+func (ms KeyValueAndUnit) SetUnitStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.UnitStrindex = v
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms KeyValueAndUnit) CopyTo(dest KeyValueAndUnit) {
+ dest.state.AssertMutable()
+ internal.CopyKeyValueAndUnit(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunitslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunitslice.go
new file mode 100644
index 000000000..a068a3d31
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_keyvalueandunitslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// KeyValueAndUnitSlice logically represents a slice of KeyValueAndUnit.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewKeyValueAndUnitSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type KeyValueAndUnitSlice struct {
+ orig *[]*internal.KeyValueAndUnit
+ state *internal.State
+}
+
+func newKeyValueAndUnitSlice(orig *[]*internal.KeyValueAndUnit, state *internal.State) KeyValueAndUnitSlice {
+ return KeyValueAndUnitSlice{orig: orig, state: state}
+}
+
+// NewKeyValueAndUnitSlice creates a KeyValueAndUnitSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewKeyValueAndUnitSlice() KeyValueAndUnitSlice {
+ orig := []*internal.KeyValueAndUnit(nil)
+ return newKeyValueAndUnitSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewKeyValueAndUnitSlice()".
+func (es KeyValueAndUnitSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es KeyValueAndUnitSlice) At(i int) KeyValueAndUnit {
+ return newKeyValueAndUnit((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es KeyValueAndUnitSlice) All() iter.Seq2[int, KeyValueAndUnit] {
+ return func(yield func(int, KeyValueAndUnit) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new KeyValueAndUnitSlice can be initialized:
+//
+// es := NewKeyValueAndUnitSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es KeyValueAndUnitSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.KeyValueAndUnit, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty KeyValueAndUnit.
+// It returns the newly added KeyValueAndUnit.
+func (es KeyValueAndUnitSlice) AppendEmpty() KeyValueAndUnit {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewKeyValueAndUnit())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es KeyValueAndUnitSlice) MoveAndAppendTo(dest KeyValueAndUnitSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es KeyValueAndUnitSlice) RemoveIf(f func(KeyValueAndUnit) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteKeyValueAndUnit((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es KeyValueAndUnitSlice) CopyTo(dest KeyValueAndUnitSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyKeyValueAndUnitPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the KeyValueAndUnit elements within KeyValueAndUnitSlice given the
+// provided less function so that two instances of KeyValueAndUnitSlice
+// can be compared.
+func (es KeyValueAndUnitSlice) Sort(less func(a, b KeyValueAndUnit) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go
new file mode 100644
index 000000000..724ce01ec
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go
@@ -0,0 +1,87 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// Line details a specific line in a source code, linked to a function.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewLine function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Line struct {
+ orig *internal.Line
+ state *internal.State
+}
+
+func newLine(orig *internal.Line, state *internal.State) Line {
+ return Line{orig: orig, state: state}
+}
+
+// NewLine creates a new empty Line.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewLine() Line {
+ return newLine(internal.NewLine(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Line) MoveTo(dest Line) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteLine(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// FunctionIndex returns the functionindex associated with this Line.
+func (ms Line) FunctionIndex() int32 {
+ return ms.orig.FunctionIndex
+}
+
+// SetFunctionIndex replaces the functionindex associated with this Line.
+func (ms Line) SetFunctionIndex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.FunctionIndex = v
+}
+
+// Line returns the line associated with this Line.
+func (ms Line) Line() int64 {
+ return ms.orig.Line
+}
+
+// SetLine replaces the line associated with this Line.
+func (ms Line) SetLine(v int64) {
+ ms.state.AssertMutable()
+ ms.orig.Line = v
+}
+
+// Column returns the column associated with this Line.
+func (ms Line) Column() int64 {
+ return ms.orig.Column
+}
+
+// SetColumn replaces the column associated with this Line.
+func (ms Line) SetColumn(v int64) {
+ ms.state.AssertMutable()
+ ms.orig.Column = v
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Line) CopyTo(dest Line) {
+ dest.state.AssertMutable()
+ internal.CopyLine(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go
new file mode 100644
index 000000000..77bc632be
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// LineSlice logically represents a slice of Line.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewLineSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type LineSlice struct {
+ orig *[]*internal.Line
+ state *internal.State
+}
+
+func newLineSlice(orig *[]*internal.Line, state *internal.State) LineSlice {
+ return LineSlice{orig: orig, state: state}
+}
+
+// NewLineSlice creates a LineSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewLineSlice() LineSlice {
+ orig := []*internal.Line(nil)
+ return newLineSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewLineSlice()".
+func (es LineSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es LineSlice) At(i int) Line {
+ return newLine((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es LineSlice) All() iter.Seq2[int, Line] {
+ return func(yield func(int, Line) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new LineSlice can be initialized:
+//
+// es := NewLineSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es LineSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Line, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Line.
+// It returns the newly added Line.
+func (es LineSlice) AppendEmpty() Line {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewLine())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es LineSlice) MoveAndAppendTo(dest LineSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es LineSlice) RemoveIf(f func(Line) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteLine((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es LineSlice) CopyTo(dest LineSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyLinePtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Line elements within LineSlice given the
+// provided less function so that two instances of LineSlice
+// can be compared.
+func (es LineSlice) Sort(less func(a, b Line) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go
new file mode 100644
index 000000000..70f401eda
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// Link represents a pointer from a profile Sample to a trace Span.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewLink function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Link struct {
+ orig *internal.Link
+ state *internal.State
+}
+
+func newLink(orig *internal.Link, state *internal.State) Link {
+ return Link{orig: orig, state: state}
+}
+
+// NewLink creates a new empty Link.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewLink() Link {
+ return newLink(internal.NewLink(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Link) MoveTo(dest Link) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteLink(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// TraceID returns the traceid associated with this Link.
+func (ms Link) TraceID() pcommon.TraceID {
+ return pcommon.TraceID(ms.orig.TraceId)
+}
+
+// SetTraceID replaces the traceid associated with this Link.
+func (ms Link) SetTraceID(v pcommon.TraceID) {
+ ms.state.AssertMutable()
+ ms.orig.TraceId = internal.TraceID(v)
+}
+
+// SpanID returns the spanid associated with this Link.
+func (ms Link) SpanID() pcommon.SpanID {
+ return pcommon.SpanID(ms.orig.SpanId)
+}
+
+// SetSpanID replaces the spanid associated with this Link.
+func (ms Link) SetSpanID(v pcommon.SpanID) {
+ ms.state.AssertMutable()
+ ms.orig.SpanId = internal.SpanID(v)
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Link) CopyTo(dest Link) {
+ dest.state.AssertMutable()
+ internal.CopyLink(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go
new file mode 100644
index 000000000..7ebce9654
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// LinkSlice logically represents a slice of Link.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewLinkSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type LinkSlice struct {
+ orig *[]*internal.Link
+ state *internal.State
+}
+
+func newLinkSlice(orig *[]*internal.Link, state *internal.State) LinkSlice {
+ return LinkSlice{orig: orig, state: state}
+}
+
+// NewLinkSlice creates a LinkSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewLinkSlice() LinkSlice {
+ orig := []*internal.Link(nil)
+ return newLinkSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewLinkSlice()".
+func (es LinkSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es LinkSlice) At(i int) Link {
+ return newLink((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es LinkSlice) All() iter.Seq2[int, Link] {
+ return func(yield func(int, Link) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new LinkSlice can be initialized:
+//
+// es := NewLinkSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es LinkSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Link, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Link.
+// It returns the newly added Link.
+func (es LinkSlice) AppendEmpty() Link {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewLink())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es LinkSlice) MoveAndAppendTo(dest LinkSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es LinkSlice) RemoveIf(f func(Link) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteLink((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es LinkSlice) CopyTo(dest LinkSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyLinkPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Link elements within LinkSlice given the
+// provided less function so that two instances of LinkSlice
+// can be compared.
+func (es LinkSlice) Sort(less func(a, b Link) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go
new file mode 100644
index 000000000..567959326
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go
@@ -0,0 +1,87 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// Location describes function and line table debug information.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewLocation function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Location struct {
+ orig *internal.Location
+ state *internal.State
+}
+
+func newLocation(orig *internal.Location, state *internal.State) Location {
+ return Location{orig: orig, state: state}
+}
+
+// NewLocation creates a new empty Location.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewLocation() Location {
+ return newLocation(internal.NewLocation(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Location) MoveTo(dest Location) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteLocation(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// MappingIndex returns the mappingindex associated with this Location.
+func (ms Location) MappingIndex() int32 {
+ return ms.orig.MappingIndex
+}
+
+// SetMappingIndex replaces the mappingindex associated with this Location.
+func (ms Location) SetMappingIndex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.MappingIndex = v
+}
+
+// Address returns the address associated with this Location.
+func (ms Location) Address() uint64 {
+ return ms.orig.Address
+}
+
+// SetAddress replaces the address associated with this Location.
+func (ms Location) SetAddress(v uint64) {
+ ms.state.AssertMutable()
+ ms.orig.Address = v
+}
+
+// Lines returns the Lines associated with this Location.
+func (ms Location) Lines() LineSlice {
+ return newLineSlice(&ms.orig.Lines, ms.state)
+}
+
+// AttributeIndices returns the AttributeIndices associated with this Location.
+func (ms Location) AttributeIndices() pcommon.Int32Slice {
+ return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Location) CopyTo(dest Location) {
+ dest.state.AssertMutable()
+ internal.CopyLocation(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go
new file mode 100644
index 000000000..9d68e247f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// LocationSlice logically represents a slice of Location.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewLocationSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type LocationSlice struct {
+ orig *[]*internal.Location
+ state *internal.State
+}
+
+func newLocationSlice(orig *[]*internal.Location, state *internal.State) LocationSlice {
+ return LocationSlice{orig: orig, state: state}
+}
+
+// NewLocationSlice creates a LocationSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewLocationSlice() LocationSlice {
+ orig := []*internal.Location(nil)
+ return newLocationSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewLocationSlice()".
+func (es LocationSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es LocationSlice) At(i int) Location {
+ return newLocation((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es LocationSlice) All() iter.Seq2[int, Location] {
+ return func(yield func(int, Location) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new LocationSlice can be initialized:
+//
+// es := NewLocationSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es LocationSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Location, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Location.
+// It returns the newly added Location.
+func (es LocationSlice) AppendEmpty() Location {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewLocation())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es LocationSlice) MoveAndAppendTo(dest LocationSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es LocationSlice) RemoveIf(f func(Location) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteLocation((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es LocationSlice) CopyTo(dest LocationSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyLocationPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Location elements within LocationSlice given the
+// provided less function so that two instances of LocationSlice
+// can be compared.
+func (es LocationSlice) Sort(less func(a, b Location) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go
new file mode 100644
index 000000000..8294acc58
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go
@@ -0,0 +1,104 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewMapping function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Mapping struct {
+ orig *internal.Mapping
+ state *internal.State
+}
+
+func newMapping(orig *internal.Mapping, state *internal.State) Mapping {
+ return Mapping{orig: orig, state: state}
+}
+
+// NewMapping creates a new empty Mapping.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewMapping() Mapping {
+ return newMapping(internal.NewMapping(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Mapping) MoveTo(dest Mapping) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteMapping(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// MemoryStart returns the memorystart associated with this Mapping.
+func (ms Mapping) MemoryStart() uint64 {
+ return ms.orig.MemoryStart
+}
+
+// SetMemoryStart replaces the memorystart associated with this Mapping.
+func (ms Mapping) SetMemoryStart(v uint64) {
+ ms.state.AssertMutable()
+ ms.orig.MemoryStart = v
+}
+
+// MemoryLimit returns the memorylimit associated with this Mapping.
+func (ms Mapping) MemoryLimit() uint64 {
+ return ms.orig.MemoryLimit
+}
+
+// SetMemoryLimit replaces the memorylimit associated with this Mapping.
+func (ms Mapping) SetMemoryLimit(v uint64) {
+ ms.state.AssertMutable()
+ ms.orig.MemoryLimit = v
+}
+
+// FileOffset returns the fileoffset associated with this Mapping.
+func (ms Mapping) FileOffset() uint64 {
+ return ms.orig.FileOffset
+}
+
+// SetFileOffset replaces the fileoffset associated with this Mapping.
+func (ms Mapping) SetFileOffset(v uint64) {
+ ms.state.AssertMutable()
+ ms.orig.FileOffset = v
+}
+
+// FilenameStrindex returns the filenamestrindex associated with this Mapping.
+func (ms Mapping) FilenameStrindex() int32 {
+ return ms.orig.FilenameStrindex
+}
+
+// SetFilenameStrindex replaces the filenamestrindex associated with this Mapping.
+func (ms Mapping) SetFilenameStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.FilenameStrindex = v
+}
+
+// AttributeIndices returns the AttributeIndices associated with this Mapping.
+func (ms Mapping) AttributeIndices() pcommon.Int32Slice {
+ return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Mapping) CopyTo(dest Mapping) {
+ dest.state.AssertMutable()
+ internal.CopyMapping(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go
new file mode 100644
index 000000000..c4ed87372
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// MappingSlice logically represents a slice of Mapping.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewMappingSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type MappingSlice struct {
+ orig *[]*internal.Mapping
+ state *internal.State
+}
+
+func newMappingSlice(orig *[]*internal.Mapping, state *internal.State) MappingSlice {
+ return MappingSlice{orig: orig, state: state}
+}
+
+// NewMappingSlice creates a MappingSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewMappingSlice() MappingSlice {
+ orig := []*internal.Mapping(nil)
+ return newMappingSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewMappingSlice()".
+func (es MappingSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es MappingSlice) At(i int) Mapping {
+ return newMapping((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es MappingSlice) All() iter.Seq2[int, Mapping] {
+ return func(yield func(int, Mapping) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new MappingSlice can be initialized:
+//
+// es := NewMappingSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es MappingSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Mapping, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Mapping.
+// It returns the newly added Mapping.
+func (es MappingSlice) AppendEmpty() Mapping {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewMapping())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es MappingSlice) MoveAndAppendTo(dest MappingSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es MappingSlice) RemoveIf(f func(Mapping) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteMapping((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es MappingSlice) CopyTo(dest MappingSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyMappingPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Mapping elements within MappingSlice given the
+// provided less function so that two instances of MappingSlice
+// can be compared.
+func (es MappingSlice) Sort(less func(a, b Mapping) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go
new file mode 100644
index 000000000..786002381
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go
@@ -0,0 +1,146 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// Profile are an implementation of the pprofextended data model.
+
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewProfile function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Profile struct {
+ orig *internal.Profile
+ state *internal.State
+}
+
+func newProfile(orig *internal.Profile, state *internal.State) Profile {
+ return Profile{orig: orig, state: state}
+}
+
+// NewProfile creates a new empty Profile.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewProfile() Profile {
+ return newProfile(internal.NewProfile(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Profile) MoveTo(dest Profile) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteProfile(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// SampleType returns the sampletype associated with this Profile.
+func (ms Profile) SampleType() ValueType {
+ return newValueType(&ms.orig.SampleType, ms.state)
+}
+
+// Samples returns the Samples associated with this Profile.
+func (ms Profile) Samples() SampleSlice {
+ return newSampleSlice(&ms.orig.Samples, ms.state)
+}
+
+// Time returns the time associated with this Profile.
+func (ms Profile) Time() pcommon.Timestamp {
+ return pcommon.Timestamp(ms.orig.TimeUnixNano)
+}
+
+// SetTime replaces the time associated with this Profile.
+func (ms Profile) SetTime(v pcommon.Timestamp) {
+ ms.state.AssertMutable()
+ ms.orig.TimeUnixNano = uint64(v)
+}
+
+// Duration returns the duration associated with this Profile.
+func (ms Profile) Duration() pcommon.Timestamp {
+ return pcommon.Timestamp(ms.orig.DurationNano)
+}
+
+// SetDuration replaces the duration associated with this Profile.
+func (ms Profile) SetDuration(v pcommon.Timestamp) {
+ ms.state.AssertMutable()
+ ms.orig.DurationNano = uint64(v)
+}
+
+// PeriodType returns the periodtype associated with this Profile.
+func (ms Profile) PeriodType() ValueType {
+ return newValueType(&ms.orig.PeriodType, ms.state)
+}
+
+// Period returns the period associated with this Profile.
+func (ms Profile) Period() int64 {
+ return ms.orig.Period
+}
+
+// SetPeriod replaces the period associated with this Profile.
+func (ms Profile) SetPeriod(v int64) {
+ ms.state.AssertMutable()
+ ms.orig.Period = v
+}
+
+// ProfileID returns the profileid associated with this Profile.
+func (ms Profile) ProfileID() ProfileID {
+ return ProfileID(ms.orig.ProfileId)
+}
+
+// SetProfileID replaces the profileid associated with this Profile.
+func (ms Profile) SetProfileID(v ProfileID) {
+ ms.state.AssertMutable()
+ ms.orig.ProfileId = internal.ProfileID(v)
+}
+
+// DroppedAttributesCount returns the droppedattributescount associated with this Profile.
+func (ms Profile) DroppedAttributesCount() uint32 {
+ return ms.orig.DroppedAttributesCount
+}
+
+// SetDroppedAttributesCount replaces the droppedattributescount associated with this Profile.
+func (ms Profile) SetDroppedAttributesCount(v uint32) {
+ ms.state.AssertMutable()
+ ms.orig.DroppedAttributesCount = v
+}
+
+// OriginalPayloadFormat returns the originalpayloadformat associated with this Profile.
+func (ms Profile) OriginalPayloadFormat() string {
+ return ms.orig.OriginalPayloadFormat
+}
+
+// SetOriginalPayloadFormat replaces the originalpayloadformat associated with this Profile.
+func (ms Profile) SetOriginalPayloadFormat(v string) {
+ ms.state.AssertMutable()
+ ms.orig.OriginalPayloadFormat = v
+}
+
+// OriginalPayload returns the OriginalPayload associated with this Profile.
+func (ms Profile) OriginalPayload() pcommon.ByteSlice {
+ return pcommon.ByteSlice(internal.NewByteSliceWrapper(&ms.orig.OriginalPayload, ms.state))
+}
+
+// AttributeIndices returns the AttributeIndices associated with this Profile.
+func (ms Profile) AttributeIndices() pcommon.Int32Slice {
+ return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Profile) CopyTo(dest Profile) {
+ dest.state.AssertMutable()
+ internal.CopyProfile(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profiles.go
new file mode 100644
index 000000000..2dc855542
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profiles.go
@@ -0,0 +1,70 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// Profiles is the top-level struct that is propagated through the profiles pipeline.
+// Use NewProfiles to create new instance, zero-initialized instance is not valid for use.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewProfiles function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Profiles internal.ProfilesWrapper
+
+func newProfiles(orig *internal.ExportProfilesServiceRequest, state *internal.State) Profiles {
+ return Profiles(internal.NewProfilesWrapper(orig, state))
+}
+
+// NewProfiles creates a new empty Profiles.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewProfiles() Profiles {
+ return newProfiles(internal.NewExportProfilesServiceRequest(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Profiles) MoveTo(dest Profiles) {
+ ms.getState().AssertMutable()
+ dest.getState().AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ internal.DeleteExportProfilesServiceRequest(dest.getOrig(), false)
+ *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
+}
+
+// ResourceProfiles returns the ResourceProfiles associated with this Profiles.
+func (ms Profiles) ResourceProfiles() ResourceProfilesSlice {
+ return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, ms.getState())
+}
+
+// Dictionary returns the dictionary associated with this Profiles.
+func (ms Profiles) Dictionary() ProfilesDictionary {
+ return newProfilesDictionary(&ms.getOrig().Dictionary, ms.getState())
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Profiles) CopyTo(dest Profiles) {
+ dest.getState().AssertMutable()
+ internal.CopyExportProfilesServiceRequest(dest.getOrig(), ms.getOrig())
+}
+
+func (ms Profiles) getOrig() *internal.ExportProfilesServiceRequest {
+ return internal.GetProfilesOrig(internal.ProfilesWrapper(ms))
+}
+
+func (ms Profiles) getState() *internal.State {
+ return internal.GetProfilesState(internal.ProfilesWrapper(ms))
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdata.go
new file mode 100644
index 000000000..dde7a05b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdata.go
@@ -0,0 +1,71 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ProfilesData represents the profiles data that can be stored in persistent storage,
+// OR can be embedded by other protocols that transfer OTLP profiles data but do not
+// implement the OTLP protocol.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewProfilesData function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ProfilesData internal.ProfilesDataWrapper
+
+func newProfilesData(orig *internal.ProfilesData, state *internal.State) ProfilesData {
+ return ProfilesData(internal.NewProfilesDataWrapper(orig, state))
+}
+
+// NewProfilesData creates a new empty ProfilesData.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewProfilesData() ProfilesData {
+ return newProfilesData(internal.NewProfilesData(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms ProfilesData) MoveTo(dest ProfilesData) {
+ ms.getState().AssertMutable()
+ dest.getState().AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ internal.DeleteProfilesData(dest.getOrig(), false)
+ *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
+}
+
+// ResourceProfiles returns the ResourceProfiles associated with this ProfilesData.
+func (ms ProfilesData) ResourceProfiles() ResourceProfilesSlice {
+ return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, ms.getState())
+}
+
+// Dictionary returns the dictionary associated with this ProfilesData.
+func (ms ProfilesData) Dictionary() ProfilesDictionary {
+ return newProfilesDictionary(&ms.getOrig().Dictionary, ms.getState())
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms ProfilesData) CopyTo(dest ProfilesData) {
+ dest.getState().AssertMutable()
+ internal.CopyProfilesData(dest.getOrig(), ms.getOrig())
+}
+
+func (ms ProfilesData) getOrig() *internal.ProfilesData {
+ return internal.GetProfilesDataOrig(internal.ProfilesDataWrapper(ms))
+}
+
+func (ms ProfilesData) getState() *internal.State {
+ return internal.GetProfilesDataState(internal.ProfilesDataWrapper(ms))
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdictionary.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdictionary.go
new file mode 100644
index 000000000..673da6d84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesdictionary.go
@@ -0,0 +1,90 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// ProfilesDictionary is the reference table containing all data shared by profiles across the message being sent.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewProfilesDictionary function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ProfilesDictionary struct {
+ orig *internal.ProfilesDictionary
+ state *internal.State
+}
+
+func newProfilesDictionary(orig *internal.ProfilesDictionary, state *internal.State) ProfilesDictionary {
+ return ProfilesDictionary{orig: orig, state: state}
+}
+
+// NewProfilesDictionary creates a new empty ProfilesDictionary.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewProfilesDictionary() ProfilesDictionary {
+ return newProfilesDictionary(internal.NewProfilesDictionary(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms ProfilesDictionary) MoveTo(dest ProfilesDictionary) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteProfilesDictionary(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// MappingTable returns the MappingTable associated with this ProfilesDictionary.
+func (ms ProfilesDictionary) MappingTable() MappingSlice {
+ return newMappingSlice(&ms.orig.MappingTable, ms.state)
+}
+
+// LocationTable returns the LocationTable associated with this ProfilesDictionary.
+func (ms ProfilesDictionary) LocationTable() LocationSlice {
+ return newLocationSlice(&ms.orig.LocationTable, ms.state)
+}
+
+// FunctionTable returns the FunctionTable associated with this ProfilesDictionary.
+func (ms ProfilesDictionary) FunctionTable() FunctionSlice {
+ return newFunctionSlice(&ms.orig.FunctionTable, ms.state)
+}
+
+// LinkTable returns the LinkTable associated with this ProfilesDictionary.
+func (ms ProfilesDictionary) LinkTable() LinkSlice {
+ return newLinkSlice(&ms.orig.LinkTable, ms.state)
+}
+
+// StringTable returns the StringTable associated with this ProfilesDictionary.
+func (ms ProfilesDictionary) StringTable() pcommon.StringSlice {
+ return pcommon.StringSlice(internal.NewStringSliceWrapper(&ms.orig.StringTable, ms.state))
+}
+
+// AttributeTable returns the AttributeTable associated with this ProfilesDictionary.
+func (ms ProfilesDictionary) AttributeTable() KeyValueAndUnitSlice {
+ return newKeyValueAndUnitSlice(&ms.orig.AttributeTable, ms.state)
+}
+
+// StackTable returns the StackTable associated with this ProfilesDictionary.
+func (ms ProfilesDictionary) StackTable() StackSlice {
+ return newStackSlice(&ms.orig.StackTable, ms.state)
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms ProfilesDictionary) CopyTo(dest ProfilesDictionary) {
+ dest.state.AssertMutable()
+ internal.CopyProfilesDictionary(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go
new file mode 100644
index 000000000..98c7e2fc2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ProfilesSlice logically represents a slice of Profile.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewProfilesSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ProfilesSlice struct {
+ orig *[]*internal.Profile
+ state *internal.State
+}
+
+func newProfilesSlice(orig *[]*internal.Profile, state *internal.State) ProfilesSlice {
+ return ProfilesSlice{orig: orig, state: state}
+}
+
+// NewProfilesSlice creates a ProfilesSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewProfilesSlice() ProfilesSlice {
+ orig := []*internal.Profile(nil)
+ return newProfilesSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewProfilesSlice()".
+func (es ProfilesSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es ProfilesSlice) At(i int) Profile {
+ return newProfile((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ProfilesSlice) All() iter.Seq2[int, Profile] {
+ return func(yield func(int, Profile) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new ProfilesSlice can be initialized:
+//
+// es := NewProfilesSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es ProfilesSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Profile, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Profile.
+// It returns the newly added Profile.
+func (es ProfilesSlice) AppendEmpty() Profile {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewProfile())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es ProfilesSlice) MoveAndAppendTo(dest ProfilesSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es ProfilesSlice) RemoveIf(f func(Profile) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteProfile((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es ProfilesSlice) CopyTo(dest ProfilesSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyProfilePtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Profile elements within ProfilesSlice given the
+// provided less function so that two instances of ProfilesSlice
+// can be compared.
+func (es ProfilesSlice) Sort(less func(a, b Profile) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go
new file mode 100644
index 000000000..cee70803f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go
@@ -0,0 +1,76 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// ResourceProfiles is a collection of profiles from a Resource.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewResourceProfiles function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ResourceProfiles struct {
+ orig *internal.ResourceProfiles
+ state *internal.State
+}
+
+func newResourceProfiles(orig *internal.ResourceProfiles, state *internal.State) ResourceProfiles {
+ return ResourceProfiles{orig: orig, state: state}
+}
+
+// NewResourceProfiles creates a new empty ResourceProfiles.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewResourceProfiles() ResourceProfiles {
+ return newResourceProfiles(internal.NewResourceProfiles(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms ResourceProfiles) MoveTo(dest ResourceProfiles) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteResourceProfiles(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// Resource returns the resource associated with this ResourceProfiles.
+func (ms ResourceProfiles) Resource() pcommon.Resource {
+ return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
+}
+
+// ScopeProfiles returns the ScopeProfiles associated with this ResourceProfiles.
+func (ms ResourceProfiles) ScopeProfiles() ScopeProfilesSlice {
+ return newScopeProfilesSlice(&ms.orig.ScopeProfiles, ms.state)
+}
+
+// SchemaUrl returns the schemaurl associated with this ResourceProfiles.
+func (ms ResourceProfiles) SchemaUrl() string {
+ return ms.orig.SchemaUrl
+}
+
+// SetSchemaUrl replaces the schemaurl associated with this ResourceProfiles.
+func (ms ResourceProfiles) SetSchemaUrl(v string) {
+ ms.state.AssertMutable()
+ ms.orig.SchemaUrl = v
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms ResourceProfiles) CopyTo(dest ResourceProfiles) {
+ dest.state.AssertMutable()
+ internal.CopyResourceProfiles(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go
new file mode 100644
index 000000000..e1f4748cf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ResourceProfilesSlice logically represents a slice of ResourceProfiles.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewResourceProfilesSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ResourceProfilesSlice struct {
+ orig *[]*internal.ResourceProfiles
+ state *internal.State
+}
+
+func newResourceProfilesSlice(orig *[]*internal.ResourceProfiles, state *internal.State) ResourceProfilesSlice {
+ return ResourceProfilesSlice{orig: orig, state: state}
+}
+
+// NewResourceProfilesSlice creates a ResourceProfilesSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewResourceProfilesSlice() ResourceProfilesSlice {
+ orig := []*internal.ResourceProfiles(nil)
+ return newResourceProfilesSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewResourceProfilesSlice()".
+func (es ResourceProfilesSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es ResourceProfilesSlice) At(i int) ResourceProfiles {
+ return newResourceProfiles((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ResourceProfilesSlice) All() iter.Seq2[int, ResourceProfiles] {
+ return func(yield func(int, ResourceProfiles) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new ResourceProfilesSlice can be initialized:
+//
+// es := NewResourceProfilesSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es ResourceProfilesSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.ResourceProfiles, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty ResourceProfiles.
+// It returns the newly added ResourceProfiles.
+func (es ResourceProfilesSlice) AppendEmpty() ResourceProfiles {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewResourceProfiles())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es ResourceProfilesSlice) MoveAndAppendTo(dest ResourceProfilesSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es ResourceProfilesSlice) RemoveIf(f func(ResourceProfiles) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteResourceProfiles((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es ResourceProfilesSlice) CopyTo(dest ResourceProfilesSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyResourceProfilesPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the ResourceProfiles elements within ResourceProfilesSlice given the
+// provided less function so that two instances of ResourceProfilesSlice
+// can be compared.
+func (es ResourceProfilesSlice) Sort(less func(a, b ResourceProfiles) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go
new file mode 100644
index 000000000..c50d09d7f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// Sample represents each record value encountered within a profiled program.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewSample function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Sample struct {
+ orig *internal.Sample
+ state *internal.State
+}
+
+func newSample(orig *internal.Sample, state *internal.State) Sample {
+ return Sample{orig: orig, state: state}
+}
+
+// NewSample creates a new empty Sample.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewSample() Sample {
+ return newSample(internal.NewSample(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Sample) MoveTo(dest Sample) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteSample(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// StackIndex returns the stackindex associated with this Sample.
+func (ms Sample) StackIndex() int32 {
+ return ms.orig.StackIndex
+}
+
+// SetStackIndex replaces the stackindex associated with this Sample.
+func (ms Sample) SetStackIndex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.StackIndex = v
+}
+
+// Values returns the Values associated with this Sample.
+func (ms Sample) Values() pcommon.Int64Slice {
+ return pcommon.Int64Slice(internal.NewInt64SliceWrapper(&ms.orig.Values, ms.state))
+}
+
+// AttributeIndices returns the AttributeIndices associated with this Sample.
+func (ms Sample) AttributeIndices() pcommon.Int32Slice {
+ return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
+}
+
+// LinkIndex returns the linkindex associated with this Sample.
+func (ms Sample) LinkIndex() int32 {
+ return ms.orig.LinkIndex
+}
+
+// SetLinkIndex replaces the linkindex associated with this Sample.
+func (ms Sample) SetLinkIndex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.LinkIndex = v
+}
+
+// TimestampsUnixNano returns the TimestampsUnixNano associated with this Sample.
+func (ms Sample) TimestampsUnixNano() pcommon.UInt64Slice {
+ return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.TimestampsUnixNano, ms.state))
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Sample) CopyTo(dest Sample) {
+ dest.state.AssertMutable()
+ internal.CopySample(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go
new file mode 100644
index 000000000..a0ce98721
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// SampleSlice logically represents a slice of Sample.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewSampleSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type SampleSlice struct {
+ orig *[]*internal.Sample
+ state *internal.State
+}
+
+func newSampleSlice(orig *[]*internal.Sample, state *internal.State) SampleSlice {
+ return SampleSlice{orig: orig, state: state}
+}
+
+// NewSampleSlice creates a SampleSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewSampleSlice() SampleSlice {
+ orig := []*internal.Sample(nil)
+ return newSampleSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewSampleSlice()".
+func (es SampleSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es SampleSlice) At(i int) Sample {
+ return newSample((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es SampleSlice) All() iter.Seq2[int, Sample] {
+ return func(yield func(int, Sample) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new SampleSlice can be initialized:
+//
+// es := NewSampleSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es SampleSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Sample, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Sample.
+// It returns the newly added Sample.
+func (es SampleSlice) AppendEmpty() Sample {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewSample())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es SampleSlice) MoveAndAppendTo(dest SampleSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es SampleSlice) RemoveIf(f func(Sample) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteSample((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es SampleSlice) CopyTo(dest SampleSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopySamplePtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Sample elements within SampleSlice given the
+// provided less function so that two instances of SampleSlice
+// can be compared.
+func (es SampleSlice) Sort(less func(a, b Sample) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go
new file mode 100644
index 000000000..01456cecc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go
@@ -0,0 +1,76 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// ScopeProfiles is a collection of profiles from a LibraryInstrumentation.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewScopeProfiles function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ScopeProfiles struct {
+ orig *internal.ScopeProfiles
+ state *internal.State
+}
+
+func newScopeProfiles(orig *internal.ScopeProfiles, state *internal.State) ScopeProfiles {
+ return ScopeProfiles{orig: orig, state: state}
+}
+
+// NewScopeProfiles creates a new empty ScopeProfiles.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewScopeProfiles() ScopeProfiles {
+ return newScopeProfiles(internal.NewScopeProfiles(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms ScopeProfiles) MoveTo(dest ScopeProfiles) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteScopeProfiles(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// Scope returns the scope associated with this ScopeProfiles.
+func (ms ScopeProfiles) Scope() pcommon.InstrumentationScope {
+ return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
+}
+
+// Profiles returns the Profiles associated with this ScopeProfiles.
+func (ms ScopeProfiles) Profiles() ProfilesSlice {
+ return newProfilesSlice(&ms.orig.Profiles, ms.state)
+}
+
+// SchemaUrl returns the schemaurl associated with this ScopeProfiles.
+func (ms ScopeProfiles) SchemaUrl() string {
+ return ms.orig.SchemaUrl
+}
+
+// SetSchemaUrl replaces the schemaurl associated with this ScopeProfiles.
+func (ms ScopeProfiles) SetSchemaUrl(v string) {
+ ms.state.AssertMutable()
+ ms.orig.SchemaUrl = v
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms ScopeProfiles) CopyTo(dest ScopeProfiles) {
+ dest.state.AssertMutable()
+ internal.CopyScopeProfiles(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go
new file mode 100644
index 000000000..5c438763a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ScopeProfilesSlice logically represents a slice of ScopeProfiles.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewScopeProfilesSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ScopeProfilesSlice struct {
+ orig *[]*internal.ScopeProfiles
+ state *internal.State
+}
+
+func newScopeProfilesSlice(orig *[]*internal.ScopeProfiles, state *internal.State) ScopeProfilesSlice {
+ return ScopeProfilesSlice{orig: orig, state: state}
+}
+
+// NewScopeProfilesSlice creates a ScopeProfilesSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewScopeProfilesSlice() ScopeProfilesSlice {
+ orig := []*internal.ScopeProfiles(nil)
+ return newScopeProfilesSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewScopeProfilesSlice()".
+func (es ScopeProfilesSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es ScopeProfilesSlice) At(i int) ScopeProfiles {
+ return newScopeProfiles((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ScopeProfilesSlice) All() iter.Seq2[int, ScopeProfiles] {
+ return func(yield func(int, ScopeProfiles) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new ScopeProfilesSlice can be initialized:
+//
+// es := NewScopeProfilesSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es ScopeProfilesSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.ScopeProfiles, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty ScopeProfiles.
+// It returns the newly added ScopeProfiles.
+func (es ScopeProfilesSlice) AppendEmpty() ScopeProfiles {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewScopeProfiles())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es ScopeProfilesSlice) MoveAndAppendTo(dest ScopeProfilesSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es ScopeProfilesSlice) RemoveIf(f func(ScopeProfiles) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteScopeProfiles((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es ScopeProfilesSlice) CopyTo(dest ScopeProfilesSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyScopeProfilesPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the ScopeProfiles elements within ScopeProfilesSlice given the
+// provided less function so that two instances of ScopeProfilesSlice
+// can be compared.
+func (es ScopeProfilesSlice) Sort(less func(a, b ScopeProfiles) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stack.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stack.go
new file mode 100644
index 000000000..53e4352c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stack.go
@@ -0,0 +1,60 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// Stack represents a stack trace as a list of locations.
+
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewStack function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Stack struct {
+ orig *internal.Stack
+ state *internal.State
+}
+
+func newStack(orig *internal.Stack, state *internal.State) Stack {
+ return Stack{orig: orig, state: state}
+}
+
+// NewStack creates a new empty Stack.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewStack() Stack {
+ return newStack(internal.NewStack(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Stack) MoveTo(dest Stack) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteStack(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// LocationIndices returns the LocationIndices associated with this Stack.
+func (ms Stack) LocationIndices() pcommon.Int32Slice {
+ return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.LocationIndices, ms.state))
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Stack) CopyTo(dest Stack) {
+ dest.state.AssertMutable()
+ internal.CopyStack(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stackslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stackslice.go
new file mode 100644
index 000000000..4df19fb68
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_stackslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// StackSlice logically represents a slice of Stack.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewStackSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type StackSlice struct {
+ orig *[]*internal.Stack
+ state *internal.State
+}
+
+func newStackSlice(orig *[]*internal.Stack, state *internal.State) StackSlice {
+ return StackSlice{orig: orig, state: state}
+}
+
+// NewStackSlice creates a StackSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewStackSlice() StackSlice {
+ orig := []*internal.Stack(nil)
+ return newStackSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewStackSlice()".
+func (es StackSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es StackSlice) At(i int) Stack {
+ return newStack((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es StackSlice) All() iter.Seq2[int, Stack] {
+ return func(yield func(int, Stack) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new StackSlice can be initialized:
+//
+// es := NewStackSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es StackSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.Stack, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Stack.
+// It returns the newly added Stack.
+func (es StackSlice) AppendEmpty() Stack {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewStack())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es StackSlice) MoveAndAppendTo(dest StackSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es StackSlice) RemoveIf(f func(Stack) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteStack((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es StackSlice) CopyTo(dest StackSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyStackPtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the Stack elements within StackSlice given the
+// provided less function so that two instances of StackSlice
+// can be compared.
+func (es StackSlice) Sort(less func(a, b Stack) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go
new file mode 100644
index 000000000..b994c9505
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go
@@ -0,0 +1,76 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ValueType describes the type and units of a value.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewValueType function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ValueType struct {
+ orig *internal.ValueType
+ state *internal.State
+}
+
+func newValueType(orig *internal.ValueType, state *internal.State) ValueType {
+ return ValueType{orig: orig, state: state}
+}
+
+// NewValueType creates a new empty ValueType.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewValueType() ValueType {
+ return newValueType(internal.NewValueType(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms ValueType) MoveTo(dest ValueType) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteValueType(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// TypeStrindex returns the typestrindex associated with this ValueType.
+func (ms ValueType) TypeStrindex() int32 {
+ return ms.orig.TypeStrindex
+}
+
+// SetTypeStrindex replaces the typestrindex associated with this ValueType.
+func (ms ValueType) SetTypeStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.TypeStrindex = v
+}
+
+// UnitStrindex returns the unitstrindex associated with this ValueType.
+func (ms ValueType) UnitStrindex() int32 {
+ return ms.orig.UnitStrindex
+}
+
+// SetUnitStrindex replaces the unitstrindex associated with this ValueType.
+func (ms ValueType) SetUnitStrindex(v int32) {
+ ms.state.AssertMutable()
+ ms.orig.UnitStrindex = v
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms ValueType) CopyTo(dest ValueType) {
+ dest.state.AssertMutable()
+ internal.CopyValueType(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go
new file mode 100644
index 000000000..aa53a9707
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go
@@ -0,0 +1,163 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pprofile
+
+import (
+ "iter"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ValueTypeSlice logically represents a slice of ValueType.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewValueTypeSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ValueTypeSlice struct {
+ orig *[]*internal.ValueType
+ state *internal.State
+}
+
+func newValueTypeSlice(orig *[]*internal.ValueType, state *internal.State) ValueTypeSlice {
+ return ValueTypeSlice{orig: orig, state: state}
+}
+
+// NewValueTypeSlice creates a ValueTypeSliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewValueTypeSlice() ValueTypeSlice {
+ orig := []*internal.ValueType(nil)
+ return newValueTypeSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewValueTypeSlice()".
+func (es ValueTypeSlice) Len() int {
+ return len(*es.orig)
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es ValueTypeSlice) At(i int) ValueType {
+ return newValueType((*es.orig)[i], es.state)
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ValueTypeSlice) All() iter.Seq2[int, ValueType] {
+ return func(yield func(int, ValueType) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new ValueTypeSlice can be initialized:
+//
+// es := NewValueTypeSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es ValueTypeSlice) EnsureCapacity(newCap int) {
+ es.state.AssertMutable()
+ oldCap := cap(*es.orig)
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]*internal.ValueType, len(*es.orig), newCap)
+ copy(newOrig, *es.orig)
+ *es.orig = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty ValueType.
+// It returns the newly added ValueType.
+func (es ValueTypeSlice) AppendEmpty() ValueType {
+ es.state.AssertMutable()
+ *es.orig = append(*es.orig, internal.NewValueType())
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es ValueTypeSlice) MoveAndAppendTo(dest ValueTypeSlice) {
+ es.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.orig == dest.orig {
+ return
+ }
+ if *dest.orig == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.orig = *es.orig
+ } else {
+ *dest.orig = append(*dest.orig, *es.orig...)
+ }
+ *es.orig = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es ValueTypeSlice) RemoveIf(f func(ValueType) bool) {
+ es.state.AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.orig); i++ {
+ if f(es.At(i)) {
+ internal.DeleteValueType((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
+ newLen++
+ }
+ *es.orig = (*es.orig)[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es ValueTypeSlice) CopyTo(dest ValueTypeSlice) {
+ dest.state.AssertMutable()
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyValueTypePtrSlice(*dest.orig, *es.orig)
+}
+
+// Sort sorts the ValueType elements within ValueTypeSlice given the
+// provided less function so that two instances of ValueTypeSlice
+// can be compared.
+func (es ValueTypeSlice) Sort(less func(a, b ValueType) bool) {
+ es.state.AssertMutable()
+ sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go
new file mode 100644
index 000000000..8b3fa3fac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go
@@ -0,0 +1,41 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "slices"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/otlp"
+)
+
+// JSONMarshaler marshals pprofile.Profiles to JSON bytes using the OTLP/JSON format.
+type JSONMarshaler struct{}
+
+// MarshalProfiles to the OTLP/JSON format.
+func (*JSONMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) {
+ dest := json.BorrowStream(nil)
+ defer json.ReturnStream(dest)
+ pd.getOrig().MarshalJSON(dest)
+ if dest.Error() != nil {
+ return nil, dest.Error()
+ }
+ return slices.Clone(dest.Buffer()), nil
+}
+
+// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pprofile.Profiles.
+type JSONUnmarshaler struct{}
+
+// UnmarshalProfiles from OTLP/JSON format into pprofile.Profiles.
+func (*JSONUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) {
+ iter := json.BorrowIterator(buf)
+ defer json.ReturnIterator(iter)
+ pd := NewProfiles()
+ pd.getOrig().UnmarshalJSON(iter)
+ if iter.Error() != nil {
+ return Profiles{}, iter.Error()
+ }
+ otlp.MigrateProfiles(pd.getOrig().ResourceProfiles)
+ return pd, nil
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/keyvalueandunit.go
new file mode 100644
index 000000000..3405074ba
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/keyvalueandunit.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// Equal checks equality with another KeyValueAndUnit
+// It assumes both structs refer to the same dictionary.
+func (ms KeyValueAndUnit) Equal(val KeyValueAndUnit) bool {
+ return ms.KeyStrindex() == val.KeyStrindex() &&
+ ms.UnitStrindex() == val.UnitStrindex() &&
+ ms.Value().Equal(val.Value())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/line.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/line.go
new file mode 100644
index 000000000..ae97b8f83
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/line.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// Equal checks equality with another LineSlice
+func (l LineSlice) Equal(val LineSlice) bool {
+ if l.Len() != val.Len() {
+ return false
+ }
+
+ for i := range l.Len() {
+ if !l.At(i).Equal(val.At(i)) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Equal checks equality with another Line
+func (l Line) Equal(val Line) bool {
+ return l.Column() == val.Column() &&
+ l.FunctionIndex() == val.FunctionIndex() &&
+ l.Line() == val.Line()
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/link.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/link.go
new file mode 100644
index 000000000..e7c27b98a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/link.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// Equal checks equality with another Link
+func (ms Link) Equal(val Link) bool {
+ return ms.TraceID() == val.TraceID() &&
+ ms.SpanID() == val.SpanID()
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/links.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/links.go
new file mode 100644
index 000000000..13a9192a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/links.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "errors"
+ "math"
+)
+
+var errTooManyLinkTableEntries = errors.New("too many entries in LinkTable")
+
+// SetLink updates a LinkTable, adding or providing a value and returns its
+// index.
+func SetLink(table LinkSlice, li Link) (int32, error) {
+ for j, l := range table.All() {
+ if l.Equal(li) {
+ if j > math.MaxInt32 {
+ return 0, errTooManyLinkTableEntries
+ }
+ return int32(j), nil //nolint:gosec // G115 overflow checked
+ }
+ }
+
+ if table.Len() >= math.MaxInt32 {
+ return 0, errTooManyLinkTableEntries
+ }
+
+ li.CopyTo(table.AppendEmpty())
+ return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/location.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/location.go
new file mode 100644
index 000000000..65ea38fac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/location.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// Equal checks equality with another Location
+func (ms Location) Equal(val Location) bool {
+ return ms.MappingIndex() == val.MappingIndex() &&
+ ms.Address() == val.Address() &&
+ ms.AttributeIndices().Equal(val.AttributeIndices()) &&
+ ms.Lines().Equal(val.Lines())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/locations.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/locations.go
new file mode 100644
index 000000000..d64334b1f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/locations.go
@@ -0,0 +1,45 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "errors"
+ "math"
+)
+
+// FromLocationIndices builds a slice containing all the locations of a Stack.
+// Updates made to the returned map will not be applied back to the Stack.
+func FromLocationIndices(table LocationSlice, record Stack) LocationSlice {
+ m := NewLocationSlice()
+ m.EnsureCapacity(record.LocationIndices().Len())
+
+ for _, idx := range record.LocationIndices().All() {
+ l := table.At(int(idx))
+ l.CopyTo(m.AppendEmpty())
+ }
+
+ return m
+}
+
+var errTooManyLocationTableEntries = errors.New("too many entries in LocationTable")
+
+// SetLocation updates a LocationTable, adding or providing a value and returns
+// its index.
+func SetLocation(table LocationSlice, loc Location) (int32, error) {
+ for j, a := range table.All() {
+ if a.Equal(loc) {
+ if j > math.MaxInt32 {
+ return 0, errTooManyLocationTableEntries
+ }
+ return int32(j), nil //nolint:gosec // G115 overflow checked
+ }
+ }
+
+ if table.Len() >= math.MaxInt32 {
+ return 0, errTooManyLocationTableEntries
+ }
+
+ loc.CopyTo(table.AppendEmpty())
+ return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/mapping.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mapping.go
new file mode 100644
index 000000000..238a54e3f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mapping.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// Equal checks equality with another Mapping
+func (ms Mapping) Equal(val Mapping) bool {
+ return ms.MemoryStart() == val.MemoryStart() &&
+ ms.MemoryLimit() == val.MemoryLimit() &&
+ ms.FileOffset() == val.FileOffset() &&
+ ms.FilenameStrindex() == val.FilenameStrindex() &&
+ ms.AttributeIndices().Equal(val.AttributeIndices())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/mappings.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mappings.go
new file mode 100644
index 000000000..34023f34a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/mappings.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "errors"
+ "math"
+)
+
+var errTooManyMappingTableEntries = errors.New("too many entries in MappingTable")
+
+// SetMapping updates a MappingTable, adding or providing a value and returns
+// its index.
+func SetMapping(table MappingSlice, ma Mapping) (int32, error) {
+ for j, m := range table.All() {
+ if m.Equal(ma) {
+ if j > math.MaxInt32 {
+ return 0, errTooManyMappingTableEntries
+ }
+ return int32(j), nil //nolint:gosec // G115 overflow checked
+ }
+ }
+
+ if table.Len() >= math.MaxInt32 {
+ return 0, errTooManyMappingTableEntries
+ }
+
+ ma.CopyTo(table.AppendEmpty())
+ return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/metadata.yaml b/vendor/go.opentelemetry.io/collector/pdata/pprofile/metadata.yaml
new file mode 100644
index 000000000..21948a68d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/metadata.yaml
@@ -0,0 +1,12 @@
+type: pprofile
+github_project: open-telemetry/opentelemetry-collector
+
+status:
+ disable_codecov_badge: true
+ class: pdata
+ codeowners:
+ active:
+ - mx-psi
+ - dmathieu
+ stability:
+ development: [profiles]
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go
new file mode 100644
index 000000000..58f4f1794
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+var _ MarshalSizer = (*ProtoMarshaler)(nil)
+
+type ProtoMarshaler struct{}
+
+func (e *ProtoMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) {
+ size := pd.getOrig().SizeProto()
+ buf := make([]byte, size)
+ _ = pd.getOrig().MarshalProto(buf)
+ return buf, nil
+}
+
+func (e *ProtoMarshaler) ProfilesSize(pd Profiles) int {
+ return pd.getOrig().SizeProto()
+}
+
+func (e *ProtoMarshaler) ResourceProfilesSize(pd ResourceProfiles) int {
+ return pd.orig.SizeProto()
+}
+
+func (e *ProtoMarshaler) ScopeProfilesSize(pd ScopeProfiles) int {
+ return pd.orig.SizeProto()
+}
+
+func (e *ProtoMarshaler) ProfileSize(pd Profile) int {
+ return pd.orig.SizeProto()
+}
+
+type ProtoUnmarshaler struct{}
+
+func (d *ProtoUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) {
+ pd := NewProfiles()
+ err := pd.getOrig().UnmarshalProto(buf)
+ if err != nil {
+ return Profiles{}, err
+ }
+ return pd, nil
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go
new file mode 100644
index 000000000..35c411ea7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "encoding/hex"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+var emptyProfileID = ProfileID([16]byte{})
+
+// ProfileID is a profile identifier.
+type ProfileID [16]byte
+
+// NewProfileIDEmpty returns a new empty (all zero bytes) ProfileID.
+func NewProfileIDEmpty() ProfileID {
+ return emptyProfileID
+}
+
+// String returns string representation of the ProfileID.
+//
+// Important: Don't rely on this method to get a string identifier of ProfileID.
+// Use hex.EncodeToString explicitly instead.
+// This method is meant to implement Stringer interface for display purposes only.
+func (ms ProfileID) String() string {
+ if ms.IsEmpty() {
+ return ""
+ }
+ return hex.EncodeToString(ms[:])
+}
+
+// IsEmpty returns true if id doesn't contain at least one non-zero byte.
+func (ms ProfileID) IsEmpty() bool {
+ return internal.ProfileID(ms).IsEmpty()
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go
new file mode 100644
index 000000000..f6d50148b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// MarkReadOnly marks the ResourceProfiles as shared so that no further modifications can be done on it.
+func (ms Profiles) MarkReadOnly() {
+ ms.getState().MarkReadOnly()
+}
+
+// IsReadOnly returns true if this ResourceProfiles instance is read-only.
+func (ms Profiles) IsReadOnly() bool {
+ return ms.getState().IsReadOnly()
+}
+
+// SampleCount calculates the total number of samples.
+func (ms Profiles) SampleCount() int {
+ sampleCount := 0
+ rps := ms.ResourceProfiles()
+ for i := 0; i < rps.Len(); i++ {
+ rp := rps.At(i)
+ sps := rp.ScopeProfiles()
+ for j := 0; j < sps.Len(); j++ {
+ pcs := sps.At(j).Profiles()
+ for k := 0; k < pcs.Len(); k++ {
+ sampleCount += pcs.At(k).Samples().Len()
+ }
+ }
+ }
+ return sampleCount
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/stack.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stack.go
new file mode 100644
index 000000000..3629b0ee9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stack.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+// Equal checks equality with another Stack
+func (ms Stack) Equal(val Stack) bool {
+ if ms.LocationIndices().Len() != val.LocationIndices().Len() {
+ return false
+ }
+
+ for i := range ms.LocationIndices().Len() {
+ if ms.LocationIndices().At(i) != val.LocationIndices().At(i) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/stacks.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stacks.go
new file mode 100644
index 000000000..762429d79
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/stacks.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "errors"
+ "math"
+)
+
+var errTooManyStackTableEntries = errors.New("too many entries in StackTable")
+
+// SetStack updates a StackSlice, adding or providing a stack and returns its
+// index.
+func SetStack(table StackSlice, st Stack) (int32, error) {
+ for j, l := range table.All() {
+ if l.Equal(st) {
+ if j > math.MaxInt32 {
+ return 0, errTooManyStackTableEntries
+ }
+ return int32(j), nil //nolint:gosec // G115 overflow checked
+ }
+ }
+
+ if table.Len() >= math.MaxInt32 {
+ return 0, errTooManyStackTableEntries
+ }
+
+ st.CopyTo(table.AppendEmpty())
+ return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/string_table.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/string_table.go
new file mode 100644
index 000000000..3c1ffb304
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/string_table.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
+
+import (
+ "errors"
+ "math"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+var errTooManyStringTableEntries = errors.New("too many entries in StringTable")
+
+// SetString updates a StringTable, adding or providing a value and returns its index.
+func SetString(table pcommon.StringSlice, val string) (int32, error) {
+ for j, v := range table.All() {
+ if v == val {
+ if j > math.MaxInt32 {
+ return 0, errTooManyStringTableEntries
+ }
+ // Return the index of the existing value.
+ return int32(j), nil //nolint:gosec // G115 overflow checked
+ }
+ }
+
+ if table.Len() >= math.MaxInt32 {
+ return 0, errTooManyMappingTableEntries
+ }
+
+ table.Append(val)
+ return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go
index 7db68c6ef..c68406cad 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go
@@ -8,7 +8,6 @@ package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewResourceSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpans struct {
- orig *otlptrace.ResourceSpans
+ orig *internal.ResourceSpans
state *internal.State
}
-func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) ResourceSpans {
+func newResourceSpans(orig *internal.ResourceSpans, state *internal.State) ResourceSpans {
return ResourceSpans{orig: orig, state: state}
}
@@ -33,7 +32,7 @@ func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) Reso
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceSpans() ResourceSpans {
- return newResourceSpans(internal.NewOrigResourceSpans(), internal.NewState())
+ return newResourceSpans(internal.NewResourceSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,13 +44,13 @@ func (ms ResourceSpans) MoveTo(dest ResourceSpans) {
if ms.orig == dest.orig {
return
}
- internal.DeleteOrigResourceSpans(dest.orig, false)
+ internal.DeleteResourceSpans(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceSpans.
func (ms ResourceSpans) Resource() pcommon.Resource {
- return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state))
+ return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeSpans returns the ScopeSpans associated with this ResourceSpans.
@@ -73,5 +72,5 @@ func (ms ResourceSpans) SetSchemaUrl(v string) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceSpans) CopyTo(dest ResourceSpans) {
dest.state.AssertMutable()
- internal.CopyOrigResourceSpans(dest.orig, ms.orig)
+ internal.CopyResourceSpans(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
index 565f413dd..ed2a7a872 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// ResourceSpansSlice logically represents a slice of ResourceSpans.
@@ -22,18 +21,18 @@ import (
// Must use NewResourceSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpansSlice struct {
- orig *[]*otlptrace.ResourceSpans
+ orig *[]*internal.ResourceSpans
state *internal.State
}
-func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans, state *internal.State) ResourceSpansSlice {
+func newResourceSpansSlice(orig *[]*internal.ResourceSpans, state *internal.State) ResourceSpansSlice {
return ResourceSpansSlice{orig: orig, state: state}
}
-// NewResourceSpansSlice creates a ResourceSpansSlice with 0 elements.
+// NewResourceSpansSlice creates a ResourceSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceSpansSlice() ResourceSpansSlice {
- orig := []*otlptrace.ResourceSpans(nil)
+ orig := []*internal.ResourceSpans(nil)
return newResourceSpansSlice(&orig, internal.NewState())
}
@@ -90,7 +89,7 @@ func (es ResourceSpansSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.ResourceSpans, len(*es.orig), newCap)
+ newOrig := make([]*internal.ResourceSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -99,7 +98,7 @@ func (es ResourceSpansSlice) EnsureCapacity(newCap int) {
// It returns the newly added ResourceSpans.
func (es ResourceSpansSlice) AppendEmpty() ResourceSpans {
es.state.AssertMutable()
- *es.orig = append(*es.orig, internal.NewOrigResourceSpans())
+ *es.orig = append(*es.orig, internal.NewResourceSpans())
return es.At(es.Len() - 1)
}
@@ -128,7 +127,7 @@ func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
- internal.DeleteOrigResourceSpans((*es.orig)[i], true)
+ internal.DeleteResourceSpans((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
@@ -152,7 +151,7 @@ func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) {
if es.orig == dest.orig {
return
}
- *dest.orig = internal.CopyOrigResourceSpansSlice(*dest.orig, *es.orig)
+ *dest.orig = internal.CopyResourceSpansPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceSpans elements within ResourceSpansSlice given the
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go
index 5ea42e985..a06c1df34 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go
@@ -8,7 +8,6 @@ package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewScopeSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpans struct {
- orig *otlptrace.ScopeSpans
+ orig *internal.ScopeSpans
state *internal.State
}
-func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans {
+func newScopeSpans(orig *internal.ScopeSpans, state *internal.State) ScopeSpans {
return ScopeSpans{orig: orig, state: state}
}
@@ -33,7 +32,7 @@ func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeSpans() ScopeSpans {
- return newScopeSpans(internal.NewOrigScopeSpans(), internal.NewState())
+ return newScopeSpans(internal.NewScopeSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,13 +44,13 @@ func (ms ScopeSpans) MoveTo(dest ScopeSpans) {
if ms.orig == dest.orig {
return
}
- internal.DeleteOrigScopeSpans(dest.orig, false)
+ internal.DeleteScopeSpans(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeSpans.
func (ms ScopeSpans) Scope() pcommon.InstrumentationScope {
- return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state))
+ return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// Spans returns the Spans associated with this ScopeSpans.
@@ -73,5 +72,5 @@ func (ms ScopeSpans) SetSchemaUrl(v string) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeSpans) CopyTo(dest ScopeSpans) {
dest.state.AssertMutable()
- internal.CopyOrigScopeSpans(dest.orig, ms.orig)
+ internal.CopyScopeSpans(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
index 8e2fbf657..2522d5b2b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// ScopeSpansSlice logically represents a slice of ScopeSpans.
@@ -22,18 +21,18 @@ import (
// Must use NewScopeSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpansSlice struct {
- orig *[]*otlptrace.ScopeSpans
+ orig *[]*internal.ScopeSpans
state *internal.State
}
-func newScopeSpansSlice(orig *[]*otlptrace.ScopeSpans, state *internal.State) ScopeSpansSlice {
+func newScopeSpansSlice(orig *[]*internal.ScopeSpans, state *internal.State) ScopeSpansSlice {
return ScopeSpansSlice{orig: orig, state: state}
}
-// NewScopeSpansSlice creates a ScopeSpansSlice with 0 elements.
+// NewScopeSpansSlice creates a ScopeSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeSpansSlice() ScopeSpansSlice {
- orig := []*otlptrace.ScopeSpans(nil)
+ orig := []*internal.ScopeSpans(nil)
return newScopeSpansSlice(&orig, internal.NewState())
}
@@ -90,7 +89,7 @@ func (es ScopeSpansSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.ScopeSpans, len(*es.orig), newCap)
+ newOrig := make([]*internal.ScopeSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -99,7 +98,7 @@ func (es ScopeSpansSlice) EnsureCapacity(newCap int) {
// It returns the newly added ScopeSpans.
func (es ScopeSpansSlice) AppendEmpty() ScopeSpans {
es.state.AssertMutable()
- *es.orig = append(*es.orig, internal.NewOrigScopeSpans())
+ *es.orig = append(*es.orig, internal.NewScopeSpans())
return es.At(es.Len() - 1)
}
@@ -128,7 +127,7 @@ func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
- internal.DeleteOrigScopeSpans((*es.orig)[i], true)
+ internal.DeleteScopeSpans((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
@@ -152,7 +151,7 @@ func (es ScopeSpansSlice) CopyTo(dest ScopeSpansSlice) {
if es.orig == dest.orig {
return
}
- *dest.orig = internal.CopyOrigScopeSpansSlice(*dest.orig, *es.orig)
+ *dest.orig = internal.CopyScopeSpansPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeSpans elements within ScopeSpansSlice given the
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go
index 433487d41..238386c17 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go
@@ -8,8 +8,6 @@ package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -22,11 +20,11 @@ import (
// Must use NewSpan function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Span struct {
- orig *otlptrace.Span
+ orig *internal.Span
state *internal.State
}
-func newSpan(orig *otlptrace.Span, state *internal.State) Span {
+func newSpan(orig *internal.Span, state *internal.State) Span {
return Span{orig: orig, state: state}
}
@@ -35,7 +33,7 @@ func newSpan(orig *otlptrace.Span, state *internal.State) Span {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpan() Span {
- return newSpan(internal.NewOrigSpan(), internal.NewState())
+ return newSpan(internal.NewSpan(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -47,7 +45,7 @@ func (ms Span) MoveTo(dest Span) {
if ms.orig == dest.orig {
return
}
- internal.DeleteOrigSpan(dest.orig, false)
+ internal.DeleteSpan(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
@@ -59,7 +57,7 @@ func (ms Span) TraceID() pcommon.TraceID {
// SetTraceID replaces the traceid associated with this Span.
func (ms Span) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
- ms.orig.TraceId = data.TraceID(v)
+ ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this Span.
@@ -70,12 +68,12 @@ func (ms Span) SpanID() pcommon.SpanID {
// SetSpanID replaces the spanid associated with this Span.
func (ms Span) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.SpanId = data.SpanID(v)
+ ms.orig.SpanId = internal.SpanID(v)
}
// TraceState returns the tracestate associated with this Span.
func (ms Span) TraceState() pcommon.TraceState {
- return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state))
+ return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// ParentSpanID returns the parentspanid associated with this Span.
@@ -86,7 +84,7 @@ func (ms Span) ParentSpanID() pcommon.SpanID {
// SetParentSpanID replaces the parentspanid associated with this Span.
func (ms Span) SetParentSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.ParentSpanId = data.SpanID(v)
+ ms.orig.ParentSpanId = internal.SpanID(v)
}
// Flags returns the flags associated with this Span.
@@ -119,7 +117,7 @@ func (ms Span) Kind() SpanKind {
// SetKind replaces the kind associated with this Span.
func (ms Span) SetKind(v SpanKind) {
ms.state.AssertMutable()
- ms.orig.Kind = otlptrace.Span_SpanKind(v)
+ ms.orig.Kind = internal.SpanKind(v)
}
// StartTimestamp returns the starttimestamp associated with this Span.
@@ -146,7 +144,7 @@ func (ms Span) SetEndTimestamp(v pcommon.Timestamp) {
// Attributes returns the Attributes associated with this Span.
func (ms Span) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Span.
@@ -200,5 +198,5 @@ func (ms Span) Status() Status {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Span) CopyTo(dest Span) {
dest.state.AssertMutable()
- internal.CopyOrigSpan(dest.orig, ms.orig)
+ internal.CopySpan(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go
index f474aca80..333f5d47b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go
@@ -8,7 +8,6 @@ package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -21,11 +20,11 @@ import (
// Must use NewSpanEvent function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEvent struct {
- orig *otlptrace.Span_Event
+ orig *internal.SpanEvent
state *internal.State
}
-func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent {
+func newSpanEvent(orig *internal.SpanEvent, state *internal.State) SpanEvent {
return SpanEvent{orig: orig, state: state}
}
@@ -34,7 +33,7 @@ func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanEvent() SpanEvent {
- return newSpanEvent(internal.NewOrigSpan_Event(), internal.NewState())
+ return newSpanEvent(internal.NewSpanEvent(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,7 +45,7 @@ func (ms SpanEvent) MoveTo(dest SpanEvent) {
if ms.orig == dest.orig {
return
}
- internal.DeleteOrigSpan_Event(dest.orig, false)
+ internal.DeleteSpanEvent(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
@@ -74,7 +73,7 @@ func (ms SpanEvent) SetName(v string) {
// Attributes returns the Attributes associated with this SpanEvent.
func (ms SpanEvent) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent.
@@ -91,5 +90,5 @@ func (ms SpanEvent) SetDroppedAttributesCount(v uint32) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanEvent) CopyTo(dest SpanEvent) {
dest.state.AssertMutable()
- internal.CopyOrigSpan_Event(dest.orig, ms.orig)
+ internal.CopySpanEvent(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
index c0da19167..f21a4d671 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanEventSlice logically represents a slice of SpanEvent.
@@ -22,18 +21,18 @@ import (
// Must use NewSpanEventSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEventSlice struct {
- orig *[]*otlptrace.Span_Event
+ orig *[]*internal.SpanEvent
state *internal.State
}
-func newSpanEventSlice(orig *[]*otlptrace.Span_Event, state *internal.State) SpanEventSlice {
+func newSpanEventSlice(orig *[]*internal.SpanEvent, state *internal.State) SpanEventSlice {
return SpanEventSlice{orig: orig, state: state}
}
-// NewSpanEventSlice creates a SpanEventSlice with 0 elements.
+// NewSpanEventSlice creates a SpanEventSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanEventSlice() SpanEventSlice {
- orig := []*otlptrace.Span_Event(nil)
+ orig := []*internal.SpanEvent(nil)
return newSpanEventSlice(&orig, internal.NewState())
}
@@ -90,7 +89,7 @@ func (es SpanEventSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.Span_Event, len(*es.orig), newCap)
+ newOrig := make([]*internal.SpanEvent, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -99,7 +98,7 @@ func (es SpanEventSlice) EnsureCapacity(newCap int) {
// It returns the newly added SpanEvent.
func (es SpanEventSlice) AppendEmpty() SpanEvent {
es.state.AssertMutable()
- *es.orig = append(*es.orig, internal.NewOrigSpan_Event())
+ *es.orig = append(*es.orig, internal.NewSpanEvent())
return es.At(es.Len() - 1)
}
@@ -128,7 +127,7 @@ func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
- internal.DeleteOrigSpan_Event((*es.orig)[i], true)
+ internal.DeleteSpanEvent((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
@@ -152,7 +151,7 @@ func (es SpanEventSlice) CopyTo(dest SpanEventSlice) {
if es.orig == dest.orig {
return
}
- *dest.orig = internal.CopyOrigSpan_EventSlice(*dest.orig, *es.orig)
+ *dest.orig = internal.CopySpanEventPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanEvent elements within SpanEventSlice given the
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go
index cfe1caaaf..ab585857a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go
@@ -8,8 +8,6 @@ package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -23,11 +21,11 @@ import (
// Must use NewSpanLink function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLink struct {
- orig *otlptrace.Span_Link
+ orig *internal.SpanLink
state *internal.State
}
-func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink {
+func newSpanLink(orig *internal.SpanLink, state *internal.State) SpanLink {
return SpanLink{orig: orig, state: state}
}
@@ -36,7 +34,7 @@ func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanLink() SpanLink {
- return newSpanLink(internal.NewOrigSpan_Link(), internal.NewState())
+ return newSpanLink(internal.NewSpanLink(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -48,7 +46,7 @@ func (ms SpanLink) MoveTo(dest SpanLink) {
if ms.orig == dest.orig {
return
}
- internal.DeleteOrigSpan_Link(dest.orig, false)
+ internal.DeleteSpanLink(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
@@ -60,7 +58,7 @@ func (ms SpanLink) TraceID() pcommon.TraceID {
// SetTraceID replaces the traceid associated with this SpanLink.
func (ms SpanLink) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
- ms.orig.TraceId = data.TraceID(v)
+ ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this SpanLink.
@@ -71,17 +69,17 @@ func (ms SpanLink) SpanID() pcommon.SpanID {
// SetSpanID replaces the spanid associated with this SpanLink.
func (ms SpanLink) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.SpanId = data.SpanID(v)
+ ms.orig.SpanId = internal.SpanID(v)
}
// TraceState returns the tracestate associated with this SpanLink.
func (ms SpanLink) TraceState() pcommon.TraceState {
- return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state))
+ return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// Attributes returns the Attributes associated with this SpanLink.
func (ms SpanLink) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanLink.
@@ -109,5 +107,5 @@ func (ms SpanLink) SetFlags(v uint32) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanLink) CopyTo(dest SpanLink) {
dest.state.AssertMutable()
- internal.CopyOrigSpan_Link(dest.orig, ms.orig)
+ internal.CopySpanLink(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
index a4626b3f1..65b8b4735 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanLinkSlice logically represents a slice of SpanLink.
@@ -22,18 +21,18 @@ import (
// Must use NewSpanLinkSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLinkSlice struct {
- orig *[]*otlptrace.Span_Link
+ orig *[]*internal.SpanLink
state *internal.State
}
-func newSpanLinkSlice(orig *[]*otlptrace.Span_Link, state *internal.State) SpanLinkSlice {
+func newSpanLinkSlice(orig *[]*internal.SpanLink, state *internal.State) SpanLinkSlice {
return SpanLinkSlice{orig: orig, state: state}
}
-// NewSpanLinkSlice creates a SpanLinkSlice with 0 elements.
+// NewSpanLinkSlice creates a SpanLinkSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanLinkSlice() SpanLinkSlice {
- orig := []*otlptrace.Span_Link(nil)
+ orig := []*internal.SpanLink(nil)
return newSpanLinkSlice(&orig, internal.NewState())
}
@@ -90,7 +89,7 @@ func (es SpanLinkSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.Span_Link, len(*es.orig), newCap)
+ newOrig := make([]*internal.SpanLink, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -99,7 +98,7 @@ func (es SpanLinkSlice) EnsureCapacity(newCap int) {
// It returns the newly added SpanLink.
func (es SpanLinkSlice) AppendEmpty() SpanLink {
es.state.AssertMutable()
- *es.orig = append(*es.orig, internal.NewOrigSpan_Link())
+ *es.orig = append(*es.orig, internal.NewSpanLink())
return es.At(es.Len() - 1)
}
@@ -128,7 +127,7 @@ func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
- internal.DeleteOrigSpan_Link((*es.orig)[i], true)
+ internal.DeleteSpanLink((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
@@ -152,7 +151,7 @@ func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) {
if es.orig == dest.orig {
return
}
- *dest.orig = internal.CopyOrigSpan_LinkSlice(*dest.orig, *es.orig)
+ *dest.orig = internal.CopySpanLinkPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanLink elements within SpanLinkSlice given the
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
index 7417966fd..9cbc68506 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanSlice logically represents a slice of Span.
@@ -22,18 +21,18 @@ import (
// Must use NewSpanSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanSlice struct {
- orig *[]*otlptrace.Span
+ orig *[]*internal.Span
state *internal.State
}
-func newSpanSlice(orig *[]*otlptrace.Span, state *internal.State) SpanSlice {
+func newSpanSlice(orig *[]*internal.Span, state *internal.State) SpanSlice {
return SpanSlice{orig: orig, state: state}
}
-// NewSpanSlice creates a SpanSlice with 0 elements.
+// NewSpanSlice creates a SpanSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanSlice() SpanSlice {
- orig := []*otlptrace.Span(nil)
+ orig := []*internal.Span(nil)
return newSpanSlice(&orig, internal.NewState())
}
@@ -90,7 +89,7 @@ func (es SpanSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.Span, len(*es.orig), newCap)
+ newOrig := make([]*internal.Span, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -99,7 +98,7 @@ func (es SpanSlice) EnsureCapacity(newCap int) {
// It returns the newly added Span.
func (es SpanSlice) AppendEmpty() Span {
es.state.AssertMutable()
- *es.orig = append(*es.orig, internal.NewOrigSpan())
+ *es.orig = append(*es.orig, internal.NewSpan())
return es.At(es.Len() - 1)
}
@@ -128,7 +127,7 @@ func (es SpanSlice) RemoveIf(f func(Span) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
- internal.DeleteOrigSpan((*es.orig)[i], true)
+ internal.DeleteSpan((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
@@ -152,7 +151,7 @@ func (es SpanSlice) CopyTo(dest SpanSlice) {
if es.orig == dest.orig {
return
}
- *dest.orig = internal.CopyOrigSpanSlice(*dest.orig, *es.orig)
+ *dest.orig = internal.CopySpanPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Span elements within SpanSlice given the
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go
index e46b45f31..da1a82726 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go
@@ -8,7 +8,6 @@ package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// Status is an optional final status for this span. Semantically, when Status was not
@@ -20,11 +19,11 @@ import (
// Must use NewStatus function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Status struct {
- orig *otlptrace.Status
+ orig *internal.Status
state *internal.State
}
-func newStatus(orig *otlptrace.Status, state *internal.State) Status {
+func newStatus(orig *internal.Status, state *internal.State) Status {
return Status{orig: orig, state: state}
}
@@ -33,7 +32,7 @@ func newStatus(orig *otlptrace.Status, state *internal.State) Status {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewStatus() Status {
- return newStatus(internal.NewOrigStatus(), internal.NewState())
+ return newStatus(internal.NewStatus(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,7 +44,7 @@ func (ms Status) MoveTo(dest Status) {
if ms.orig == dest.orig {
return
}
- internal.DeleteOrigStatus(dest.orig, false)
+ internal.DeleteStatus(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
@@ -68,11 +67,11 @@ func (ms Status) Code() StatusCode {
// SetCode replaces the code associated with this Status.
func (ms Status) SetCode(v StatusCode) {
ms.state.AssertMutable()
- ms.orig.Code = otlptrace.Status_StatusCode(v)
+ ms.orig.Code = internal.StatusCode(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Status) CopyTo(dest Status) {
dest.state.AssertMutable()
- internal.CopyOrigStatus(dest.orig, ms.orig)
+ internal.CopyStatus(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go
index 4334e8c79..5e69bc03f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go
@@ -8,7 +8,6 @@ package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
)
// Traces is the top-level struct that is propagated through the traces pipeline.
@@ -19,10 +18,10 @@ import (
//
// Must use NewTraces function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Traces internal.Traces
+type Traces internal.TracesWrapper
-func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *internal.State) Traces {
- return Traces(internal.NewTraces(orig, state))
+func newTraces(orig *internal.ExportTraceServiceRequest, state *internal.State) Traces {
+ return Traces(internal.NewTracesWrapper(orig, state))
}
// NewTraces creates a new empty Traces.
@@ -30,7 +29,7 @@ func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *intern
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewTraces() Traces {
- return newTraces(internal.NewOrigExportTraceServiceRequest(), internal.NewState())
+ return newTraces(internal.NewExportTraceServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -42,7 +41,7 @@ func (ms Traces) MoveTo(dest Traces) {
if ms.getOrig() == dest.getOrig() {
return
}
- internal.DeleteOrigExportTraceServiceRequest(dest.getOrig(), false)
+ internal.DeleteExportTraceServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
@@ -54,13 +53,13 @@ func (ms Traces) ResourceSpans() ResourceSpansSlice {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Traces) CopyTo(dest Traces) {
dest.getState().AssertMutable()
- internal.CopyOrigExportTraceServiceRequest(dest.getOrig(), ms.getOrig())
+ internal.CopyExportTraceServiceRequest(dest.getOrig(), ms.getOrig())
}
-func (ms Traces) getOrig() *otlpcollectortrace.ExportTraceServiceRequest {
- return internal.GetOrigTraces(internal.Traces(ms))
+func (ms Traces) getOrig() *internal.ExportTraceServiceRequest {
+ return internal.GetTracesOrig(internal.TracesWrapper(ms))
}
func (ms Traces) getState() *internal.State {
- return internal.GetTracesState(internal.Traces(ms))
+ return internal.GetTracesState(internal.TracesWrapper(ms))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go
index 1c1cab317..a6f0dd36f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go
@@ -6,7 +6,6 @@ package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
"slices"
- "go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
@@ -18,7 +17,7 @@ type JSONMarshaler struct{}
func (*JSONMarshaler) MarshalTraces(td Traces) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
- internal.MarshalJSONOrigExportTraceServiceRequest(td.getOrig(), dest)
+ td.getOrig().MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
@@ -33,7 +32,7 @@ func (*JSONUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
td := NewTraces()
- internal.UnmarshalJSONOrigExportTraceServiceRequest(td.getOrig(), iter)
+ td.getOrig().UnmarshalJSON(iter)
if iter.Error() != nil {
return Traces{}, iter.Error()
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
index c1a01f1e2..c9a02b289 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
@@ -3,64 +3,38 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
-import (
- "go.opentelemetry.io/collector/pdata/internal"
-)
-
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalTraces(td Traces) ([]byte, error) {
- if !internal.UseCustomProtoEncoding.IsEnabled() {
- return td.getOrig().Marshal()
- }
- size := internal.SizeProtoOrigExportTraceServiceRequest(td.getOrig())
+ size := td.getOrig().SizeProto()
buf := make([]byte, size)
- _ = internal.MarshalProtoOrigExportTraceServiceRequest(td.getOrig(), buf)
+ _ = td.getOrig().MarshalProto(buf)
return buf, nil
}
func (e *ProtoMarshaler) TracesSize(td Traces) int {
- if !internal.UseCustomProtoEncoding.IsEnabled() {
- return td.getOrig().Size()
- }
- return internal.SizeProtoOrigExportTraceServiceRequest(td.getOrig())
+ return td.getOrig().SizeProto()
}
func (e *ProtoMarshaler) ResourceSpansSize(td ResourceSpans) int {
- if !internal.UseCustomProtoEncoding.IsEnabled() {
- return td.orig.Size()
- }
- return internal.SizeProtoOrigResourceSpans(td.orig)
+ return td.orig.SizeProto()
}
func (e *ProtoMarshaler) ScopeSpansSize(td ScopeSpans) int {
- if !internal.UseCustomProtoEncoding.IsEnabled() {
- return td.orig.Size()
- }
- return internal.SizeProtoOrigScopeSpans(td.orig)
+ return td.orig.SizeProto()
}
func (e *ProtoMarshaler) SpanSize(td Span) int {
- if !internal.UseCustomProtoEncoding.IsEnabled() {
- return td.orig.Size()
- }
- return internal.SizeProtoOrigSpan(td.orig)
+ return td.orig.SizeProto()
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
td := NewTraces()
- if !internal.UseCustomProtoEncoding.IsEnabled() {
- err := td.getOrig().Unmarshal(buf)
- if err != nil {
- return Traces{}, err
- }
- return td, nil
- }
- err := internal.UnmarshalProtoOrigExportTraceServiceRequest(td.getOrig(), buf)
+ err := td.getOrig().UnmarshalProto(buf)
if err != nil {
return Traces{}, err
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go
index 561d82cff..bb1702ffe 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go
@@ -4,7 +4,7 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// SpanKind is the type of span. Can be used to specify additional relationships between spans
@@ -13,25 +13,25 @@ type SpanKind int32
const (
// SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used.
- SpanKindUnspecified = SpanKind(otlptrace.Span_SPAN_KIND_UNSPECIFIED)
+ SpanKindUnspecified = SpanKind(internal.SpanKind_SPAN_KIND_UNSPECIFIED)
// SpanKindInternal indicates that the span represents an internal operation within an application,
// as opposed to an operation happening at the boundaries. Default value.
- SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL)
+ SpanKindInternal = SpanKind(internal.SpanKind_SPAN_KIND_INTERNAL)
// SpanKindServer indicates that the span covers server-side handling of an RPC or other
// remote network request.
- SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER)
+ SpanKindServer = SpanKind(internal.SpanKind_SPAN_KIND_SERVER)
// SpanKindClient indicates that the span describes a request to some remote service.
- SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT)
+ SpanKindClient = SpanKind(internal.SpanKind_SPAN_KIND_CLIENT)
// SpanKindProducer indicates that the span describes a producer sending a message to a broker.
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
// between producer and consumer spans.
// A PRODUCER span ends when the message was accepted by the broker while the logical processing of
// the message might span a much longer time.
- SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER)
+ SpanKindProducer = SpanKind(internal.SpanKind_SPAN_KIND_PRODUCER)
// SpanKindConsumer indicates that the span describes consumer receiving a message from a broker.
// Like the PRODUCER kind, there is often no direct critical path latency relationship between
// producer and consumer spans.
- SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER)
+ SpanKindConsumer = SpanKind(internal.SpanKind_SPAN_KIND_CONSUMER)
)
// String returns the string representation of the SpanKind.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go
index 18a21f56b..d1da46436 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go
@@ -4,7 +4,7 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// StatusCode mirrors the codes defined at
@@ -12,9 +12,9 @@ import (
type StatusCode int32
const (
- StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET)
- StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK)
- StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR)
+ StatusCodeUnset = StatusCode(internal.StatusCode_STATUS_CODE_UNSET)
+ StatusCodeOk = StatusCode(internal.StatusCode_STATUS_CODE_OK)
+ StatusCodeError = StatusCode(internal.StatusCode_STATUS_CODE_ERROR)
)
// String returns the string representation of the StatusCode.
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
index 8cf5d8112..956795524 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
@@ -38,6 +38,9 @@ type chacha20poly1305 struct {
// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key.
func New(key []byte) (cipher.AEAD, error) {
+ if fips140Enforced() {
+ return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
+ }
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go
new file mode 100644
index 000000000..9b9d5643e
--- /dev/null
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go
@@ -0,0 +1,9 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package chacha20poly1305
+
+func fips140Enforced() bool { return false }
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go
new file mode 100644
index 000000000..f71089c48
--- /dev/null
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go
@@ -0,0 +1,11 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package chacha20poly1305
+
+import "crypto/fips140"
+
+func fips140Enforced() bool { return fips140.Enforced() }
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
index 1cebfe946..b4299b718 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
@@ -22,6 +22,9 @@ type xchacha20poly1305 struct {
// preferred when nonce uniqueness cannot be trivially ensured, or whenever
// nonces are randomly generated.
func NewX(key []byte) (cipher.AEAD, error) {
+ if fips140Enforced() {
+ return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
+ }
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
index cb4cadc32..dfbfc1eb3 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
@@ -37,6 +37,15 @@ type priorityWriteSchedulerRFC9218 struct {
// incremental streams or not, when urgency is the same in a given Pop()
// call.
prioritizeIncremental bool
+
+ // priorityUpdateBuf is used to buffer the most recent PRIORITY_UPDATE we
+ // receive per https://www.rfc-editor.org/rfc/rfc9218.html#name-the-priority_update-frame.
+ priorityUpdateBuf struct {
+ // streamID being 0 means that the buffer is empty. This is a safe
+ // assumption as PRIORITY_UPDATE for stream 0 is a PROTOCOL_ERROR.
+ streamID uint32
+ priority PriorityParam
+ }
}
func newPriorityWriteSchedulerRFC9218() WriteScheduler {
@@ -50,6 +59,10 @@ func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStr
if ws.streams[streamID].location != nil {
panic(fmt.Errorf("stream %d already opened", streamID))
}
+ if streamID == ws.priorityUpdateBuf.streamID {
+ ws.priorityUpdateBuf.streamID = 0
+ opt.priority = ws.priorityUpdateBuf.priority
+ }
q := ws.queuePool.get()
ws.streams[streamID] = streamMetadata{
location: q,
@@ -95,6 +108,8 @@ func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority
metadata := ws.streams[streamID]
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
if q == nil {
+ ws.priorityUpdateBuf.streamID = streamID
+ ws.priorityUpdateBuf.priority = priority
return
}
diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go
new file mode 100644
index 000000000..f8b779ea2
--- /dev/null
+++ b/vendor/golang.org/x/net/netutil/listen.go
@@ -0,0 +1,87 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netutil provides network utility functions, complementing the more
+// common ones in the net package.
+package netutil // import "golang.org/x/net/netutil"
+
+import (
+ "net"
+ "sync"
+)
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+ return &limitListener{
+ Listener: l,
+ sem: make(chan struct{}, n),
+ done: make(chan struct{}),
+ }
+}
+
+type limitListener struct {
+ net.Listener
+ sem chan struct{}
+ closeOnce sync.Once // ensures the done chan is only closed once
+ done chan struct{} // no values sent; closed when Close is called
+}
+
+// acquire acquires the limiting semaphore. Returns true if successfully
+// acquired, false if the listener is closed and the semaphore is not
+// acquired.
+func (l *limitListener) acquire() bool {
+ select {
+ case <-l.done:
+ return false
+ case l.sem <- struct{}{}:
+ return true
+ }
+}
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+ if !l.acquire() {
+ // If the semaphore isn't acquired because the listener was closed, expect
+ // that this call to accept won't block, but immediately return an error.
+ // If it instead returns a spurious connection (due to a bug in the
+ // Listener, such as https://golang.org/issue/50216), we immediately close
+ // it and try again. Some buggy Listener implementations (like the one in
+ // the aforementioned issue) seem to assume that Accept will be called to
+ // completion, and may otherwise fail to clean up the client end of pending
+ // connections.
+ for {
+ c, err := l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ c.Close()
+ }
+ }
+
+ c, err := l.Listener.Accept()
+ if err != nil {
+ l.release()
+ return nil, err
+ }
+ return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+func (l *limitListener) Close() error {
+ err := l.Listener.Close()
+ l.closeOnce.Do(func() { close(l.done) })
+ return err
+}
+
+type limitListenerConn struct {
+ net.Conn
+ releaseOnce sync.Once
+ release func()
+}
+
+func (l *limitListenerConn) Close() error {
+ err := l.Conn.Close()
+ l.releaseOnce.Do(l.release)
+ return err
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
index 1e642f330..f5723d4f7 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -64,6 +64,80 @@ func initOptions() {
func archInit() {
+ // From internal/cpu
+ const (
+ // eax bits
+ cpuid_AVXVNNI = 1 << 4
+
+ // ecx bits
+ cpuid_SSE3 = 1 << 0
+ cpuid_PCLMULQDQ = 1 << 1
+ cpuid_AVX512VBMI = 1 << 1
+ cpuid_AVX512VBMI2 = 1 << 6
+ cpuid_SSSE3 = 1 << 9
+ cpuid_AVX512GFNI = 1 << 8
+ cpuid_AVX512VAES = 1 << 9
+ cpuid_AVX512VNNI = 1 << 11
+ cpuid_AVX512BITALG = 1 << 12
+ cpuid_FMA = 1 << 12
+ cpuid_AVX512VPOPCNTDQ = 1 << 14
+ cpuid_SSE41 = 1 << 19
+ cpuid_SSE42 = 1 << 20
+ cpuid_POPCNT = 1 << 23
+ cpuid_AES = 1 << 25
+ cpuid_OSXSAVE = 1 << 27
+ cpuid_AVX = 1 << 28
+
+ // "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0
+ cpuid_BMI1 = 1 << 3
+ cpuid_AVX2 = 1 << 5
+ cpuid_BMI2 = 1 << 8
+ cpuid_ERMS = 1 << 9
+ cpuid_AVX512F = 1 << 16
+ cpuid_AVX512DQ = 1 << 17
+ cpuid_ADX = 1 << 19
+ cpuid_AVX512CD = 1 << 28
+ cpuid_SHA = 1 << 29
+ cpuid_AVX512BW = 1 << 30
+ cpuid_AVX512VL = 1 << 31
+
+ // "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0
+ cpuid_AVX512_VBMI = 1 << 1
+ cpuid_AVX512_VBMI2 = 1 << 6
+ cpuid_GFNI = 1 << 8
+ cpuid_AVX512VPCLMULQDQ = 1 << 10
+ cpuid_AVX512_BITALG = 1 << 12
+
+ // edx bits
+ cpuid_FSRM = 1 << 4
+ // edx bits for CPUID 0x80000001
+ cpuid_RDTSCP = 1 << 27
+ )
+ // Additional constants not in internal/cpu
+ const (
+ // eax=1: edx
+ cpuid_SSE2 = 1 << 26
+ // eax=1: ecx
+ cpuid_CX16 = 1 << 13
+ cpuid_RDRAND = 1 << 30
+ // eax=7,ecx=0: ebx
+ cpuid_RDSEED = 1 << 18
+ cpuid_AVX512IFMA = 1 << 21
+ cpuid_AVX512PF = 1 << 26
+ cpuid_AVX512ER = 1 << 27
+ // eax=7,ecx=0: edx
+ cpuid_AVX5124VNNIW = 1 << 2
+ cpuid_AVX5124FMAPS = 1 << 3
+ cpuid_AMXBF16 = 1 << 22
+ cpuid_AMXTile = 1 << 24
+ cpuid_AMXInt8 = 1 << 25
+ // eax=7,ecx=1: eax
+ cpuid_AVX512BF16 = 1 << 5
+ cpuid_AVXIFMA = 1 << 23
+ // eax=7,ecx=1: edx
+ cpuid_AVXVNNIInt8 = 1 << 4
+ )
+
Initialized = true
maxID, _, _, _ := cpuid(0, 0)
@@ -73,90 +147,90 @@ func archInit() {
}
_, _, ecx1, edx1 := cpuid(1, 0)
- X86.HasSSE2 = isSet(26, edx1)
-
- X86.HasSSE3 = isSet(0, ecx1)
- X86.HasPCLMULQDQ = isSet(1, ecx1)
- X86.HasSSSE3 = isSet(9, ecx1)
- X86.HasFMA = isSet(12, ecx1)
- X86.HasCX16 = isSet(13, ecx1)
- X86.HasSSE41 = isSet(19, ecx1)
- X86.HasSSE42 = isSet(20, ecx1)
- X86.HasPOPCNT = isSet(23, ecx1)
- X86.HasAES = isSet(25, ecx1)
- X86.HasOSXSAVE = isSet(27, ecx1)
- X86.HasRDRAND = isSet(30, ecx1)
+ X86.HasSSE2 = isSet(edx1, cpuid_SSE2)
+
+ X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
+ X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
+ X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
+ X86.HasFMA = isSet(ecx1, cpuid_FMA)
+ X86.HasCX16 = isSet(ecx1, cpuid_CX16)
+ X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
+ X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
+ X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
+ X86.HasAES = isSet(ecx1, cpuid_AES)
+ X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
+ X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND)
var osSupportsAVX, osSupportsAVX512 bool
// For XGETBV, OSXSAVE bit is required and sufficient.
if X86.HasOSXSAVE {
eax, _ := xgetbv()
// Check if XMM and YMM registers have OS support.
- osSupportsAVX = isSet(1, eax) && isSet(2, eax)
+ osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
if runtime.GOOS == "darwin" {
// Darwin requires special AVX512 checks, see cpu_darwin_x86.go
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else {
// Check if OPMASK and ZMM registers have OS support.
- osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
+ osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7)
}
}
- X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
+ X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
if maxID < 7 {
return
}
eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
- X86.HasBMI1 = isSet(3, ebx7)
- X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
- X86.HasBMI2 = isSet(8, ebx7)
- X86.HasERMS = isSet(9, ebx7)
- X86.HasRDSEED = isSet(18, ebx7)
- X86.HasADX = isSet(19, ebx7)
-
- X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
+ X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
+ X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
+ X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
+ X86.HasERMS = isSet(ebx7, cpuid_ERMS)
+ X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED)
+ X86.HasADX = isSet(ebx7, cpuid_ADX)
+
+ X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
if X86.HasAVX512 {
X86.HasAVX512F = true
- X86.HasAVX512CD = isSet(28, ebx7)
- X86.HasAVX512ER = isSet(27, ebx7)
- X86.HasAVX512PF = isSet(26, ebx7)
- X86.HasAVX512VL = isSet(31, ebx7)
- X86.HasAVX512BW = isSet(30, ebx7)
- X86.HasAVX512DQ = isSet(17, ebx7)
- X86.HasAVX512IFMA = isSet(21, ebx7)
- X86.HasAVX512VBMI = isSet(1, ecx7)
- X86.HasAVX5124VNNIW = isSet(2, edx7)
- X86.HasAVX5124FMAPS = isSet(3, edx7)
- X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7)
- X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7)
- X86.HasAVX512VNNI = isSet(11, ecx7)
- X86.HasAVX512GFNI = isSet(8, ecx7)
- X86.HasAVX512VAES = isSet(9, ecx7)
- X86.HasAVX512VBMI2 = isSet(6, ecx7)
- X86.HasAVX512BITALG = isSet(12, ecx7)
+ X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD)
+ X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER)
+ X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF)
+ X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
+ X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW)
+ X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ)
+ X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA)
+ X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI)
+ X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW)
+ X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS)
+ X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ)
+ X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ)
+ X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI)
+ X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI)
+ X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES)
+ X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2)
+ X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG)
}
- X86.HasAMXTile = isSet(24, edx7)
- X86.HasAMXInt8 = isSet(25, edx7)
- X86.HasAMXBF16 = isSet(22, edx7)
+ X86.HasAMXTile = isSet(edx7, cpuid_AMXTile)
+ X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8)
+ X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16)
// These features depend on the second level of extended features.
if eax7 >= 1 {
eax71, _, _, edx71 := cpuid(7, 1)
if X86.HasAVX512 {
- X86.HasAVX512BF16 = isSet(5, eax71)
+ X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16)
}
if X86.HasAVX {
- X86.HasAVXIFMA = isSet(23, eax71)
- X86.HasAVXVNNI = isSet(4, eax71)
- X86.HasAVXVNNIInt8 = isSet(4, edx71)
+ X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA)
+ X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI)
+ X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8)
}
}
}
-func isSet(bitpos uint, value uint32) bool {
- return value&(1<= 4 && b[0] == keyEscape && b[1] == '[' && b[2] == '3' && b[3] == '~' {
+ return keyDelete, b[4:]
+ }
+
if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
switch b[5] {
case 'C':
@@ -590,7 +598,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
}
t.line = t.line[:t.pos]
t.moveCursorToPos(t.pos)
- case keyCtrlD:
+ case keyCtrlD, keyDelete:
// Erase the character under the current position.
// The EOF case when the line is empty is handled in
// readLine().
@@ -600,6 +608,24 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
}
case keyCtrlU:
t.eraseNPreviousChars(t.pos)
+ case keyTranspose:
+ // This transposes the two characters around the cursor and advances the cursor. Best-effort.
+ if len(t.line) < 2 || t.pos < 1 {
+ return
+ }
+ swap := t.pos
+ if swap == len(t.line) {
+ swap-- // special: at end of line, swap previous two chars
+ }
+ t.line[swap-1], t.line[swap] = t.line[swap], t.line[swap-1]
+ if t.pos < len(t.line) {
+ t.pos++
+ }
+ if t.echo {
+ t.moveCursorToPos(swap - 1)
+ t.writeLine(t.line[swap-1:])
+ t.moveCursorToPos(t.pos)
+ }
case keyClearScreen:
// Erases the screen and moves the cursor to the home position.
t.queue([]rune("\x1b[2J\x1b[H"))
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index 060ab08ef..ff607389d 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -1027,11 +1027,15 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
func (ld *loader) loadPackage(lpkg *loaderPackage) {
if lpkg.PkgPath == "unsafe" {
- // Fill in the blanks to avoid surprises.
+ // To avoid surprises, fill in the blanks consistent
+ // with other packages. (For example, some analyzers
+ // assert that each needed types.Info map is non-nil
+ // even when there is no syntax that would cause them
+ // to consult the map.)
lpkg.Types = types.Unsafe
lpkg.Fset = ld.Fset
lpkg.Syntax = []*ast.File{}
- lpkg.TypesInfo = new(types.Info)
+ lpkg.TypesInfo = ld.newTypesInfo()
lpkg.TypesSizes = ld.sizes
return
}
@@ -1180,20 +1184,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
return
}
- // Populate TypesInfo only if needed, as it
- // causes the type checker to work much harder.
- if ld.Config.Mode&NeedTypesInfo != 0 {
- lpkg.TypesInfo = &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Instances: make(map[*ast.Ident]types.Instance),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- FileVersions: make(map[*ast.File]string),
- }
- }
+ lpkg.TypesInfo = ld.newTypesInfo()
lpkg.TypesSizes = ld.sizes
importer := importerFunc(func(path string) (*types.Package, error) {
@@ -1307,6 +1298,24 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
lpkg.IllTyped = illTyped
}
+func (ld *loader) newTypesInfo() *types.Info {
+ // Populate TypesInfo only if needed, as it
+ // causes the type checker to work much harder.
+ if ld.Config.Mode&NeedTypesInfo == 0 {
+ return nil
+ }
+ return &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Instances: make(map[*ast.Ident]types.Instance),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ FileVersions: make(map[*ast.File]string),
+ }
+}
+
// An importFunc is an implementation of the single-method
// types.Importer interface based on a function value.
type importerFunc func(path string) (*types.Package, error)
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
index 5f10f56cb..3d24a8c63 100644
--- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -12,6 +12,7 @@ import (
// Callee returns the named target of a function call, if any:
// a function, method, builtin, or variable.
+// It returns nil for a T(x) conversion.
//
// Functions and methods may potentially have type parameters.
//
diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go
index 05f3a9a57..16ae6bb02 100644
--- a/vendor/golang.org/x/tools/internal/event/core/export.go
+++ b/vendor/golang.org/x/tools/internal/event/core/export.go
@@ -8,7 +8,6 @@ import (
"context"
"sync/atomic"
"time"
- "unsafe"
"golang.org/x/tools/internal/event/label"
)
@@ -17,23 +16,21 @@ import (
// It may return a modified context and event.
type Exporter func(context.Context, Event, label.Map) context.Context
-var (
- exporter unsafe.Pointer
-)
+var exporter atomic.Pointer[Exporter]
// SetExporter sets the global exporter function that handles all events.
// The exporter is called synchronously from the event call site, so it should
// return quickly so as not to hold up user code.
func SetExporter(e Exporter) {
- p := unsafe.Pointer(&e)
if e == nil {
// &e is always valid, and so p is always valid, but for the early abort
// of ProcessEvent to be efficient it needs to make the nil check on the
// pointer without having to dereference it, so we make the nil function
// also a nil pointer
- p = nil
+ exporter.Store(nil)
+ } else {
+ exporter.Store(&e)
}
- atomic.StorePointer(&exporter, p)
}
// deliver is called to deliver an event to the supplied exporter.
@@ -48,7 +45,7 @@ func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
// Export is called to deliver an event to the global exporter if set.
func Export(ctx context.Context, ev Event) context.Context {
// get the global exporter and abort early if there is not one
- exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ exporterPtr := exporter.Load()
if exporterPtr == nil {
return ctx
}
@@ -61,7 +58,7 @@ func Export(ctx context.Context, ev Event) context.Context {
// It will fill in the time.
func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
// get the global exporter and abort early if there is not one
- exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ exporterPtr := exporter.Load()
if exporterPtr == nil {
return ctx, func() {}
}
diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go
index 92a391057..c37584af9 100644
--- a/vendor/golang.org/x/tools/internal/event/label/label.go
+++ b/vendor/golang.org/x/tools/internal/event/label/label.go
@@ -7,7 +7,6 @@ package label
import (
"fmt"
"io"
- "reflect"
"slices"
"unsafe"
)
@@ -103,11 +102,10 @@ type stringptr unsafe.Pointer
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func OfString(k Key, v string) Label {
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
return Label{
key: k,
- packed: uint64(hdr.Len),
- untyped: stringptr(hdr.Data),
+ packed: uint64(len(v)),
+ untyped: stringptr(unsafe.StringData(v)),
}
}
@@ -116,11 +114,7 @@ func OfString(k Key, v string) Label {
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) UnpackString() string {
- var v string
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
- hdr.Data = uintptr(t.untyped.(stringptr))
- hdr.Len = int(t.packed)
- return v
+ return unsafe.String((*byte)(t.untyped.(stringptr)), int(t.packed))
}
// Valid returns true if the Label is a valid one (it has a key).
diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
index 581784da4..f7b9c1286 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/deps.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -12,360 +12,364 @@ type pkginfo struct {
}
var deps = [...]pkginfo{
- {"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
- {"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
- {"bufio", "\x03n\x84\x01D\x14"},
- {"bytes", "q*Z\x03\fG\x02\x02"},
+ {"archive/tar", "\x03p\x03F=\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
+ {"archive/zip", "\x02\x04f\a\x03\x13\x021=\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
+ {"bufio", "\x03p\x86\x01D\x14"},
+ {"bytes", "s+[\x03\fG\x02\x02"},
{"cmp", ""},
- {"compress/bzip2", "\x02\x02\xf1\x01A"},
- {"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"},
- {"compress/gzip", "\x02\x04d\a\x03\x14mT"},
- {"compress/lzw", "\x02o\x03\x81\x01"},
- {"compress/zlib", "\x02\x04d\a\x03\x12\x01n"},
- {"container/heap", "\xb7\x02"},
+ {"compress/bzip2", "\x02\x02\xf5\x01A"},
+ {"compress/flate", "\x02q\x03\x83\x01\f\x033\x01\x03"},
+ {"compress/gzip", "\x02\x04f\a\x03\x15nT"},
+ {"compress/lzw", "\x02q\x03\x83\x01"},
+ {"compress/zlib", "\x02\x04f\a\x03\x13\x01o"},
+ {"container/heap", "\xbb\x02"},
{"container/list", ""},
{"container/ring", ""},
- {"context", "q[o\x01\r"},
- {"crypto", "\x86\x01oC"},
- {"crypto/aes", "\x10\n\t\x95\x02"},
- {"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"},
- {"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"},
- {"crypto/dsa", "D\x04)\x84\x01\r"},
- {"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"},
- {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"},
- {"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"},
- {"crypto/elliptic", "2?\x84\x01\r9"},
+ {"context", "s\\p\x01\r"},
+ {"crypto", "\x89\x01pC"},
+ {"crypto/aes", "\x10\n\t\x99\x02"},
+ {"crypto/cipher", "\x03 \x01\x01 \x12\x1c,Z"},
+ {"crypto/des", "\x10\x15 .,\x9d\x01\x03"},
+ {"crypto/dsa", "E\x04*\x86\x01\r"},
+ {"crypto/ecdh", "\x03\v\f\x10\x04\x17\x04\x0e\x1c\x86\x01"},
+ {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\b\v\x06\x01\x04\r\x01\x1c\x86\x01\r\x05K\x01"},
+ {"crypto/ed25519", "\x0e\x1e\x12\a\v\a\x1c\x86\x01C"},
+ {"crypto/elliptic", "3@\x86\x01\r9"},
{"crypto/fips140", "\"\x05"},
- {"crypto/hkdf", "/\x14\x01-\x15"},
- {"crypto/hmac", "\x1a\x16\x13\x01\x111"},
- {"crypto/internal/boring", "\x0e\x02\ri"},
- {"crypto/internal/boring/bbig", "\x1a\xe8\x01M"},
- {"crypto/internal/boring/bcache", "\xbc\x02\x13"},
+ {"crypto/hkdf", "/\x15\x01.\x16"},
+ {"crypto/hmac", "\x1a\x16\x14\x01\x122"},
+ {"crypto/internal/boring", "\x0e\x02\rl"},
+ {"crypto/internal/boring/bbig", "\x1a\xec\x01M"},
+ {"crypto/internal/boring/bcache", "\xc0\x02\x13"},
{"crypto/internal/boring/sig", ""},
{"crypto/internal/constanttime", ""},
- {"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
- {"crypto/internal/entropy", "I"},
- {"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"},
- {"crypto/internal/fips140", "A0\xbd\x01\v\x16"},
- {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"},
- {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"},
- {"crypto/internal/fips140/alias", "\xcf\x02"},
- {"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"},
- {"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"},
- {"crypto/internal/fips140/check/checktest", "'\x87\x02!"},
- {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"},
- {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"},
- {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"},
- {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"},
- {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"},
- {"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"},
- {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"},
- {"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"},
- {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"},
- {"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"},
- {"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"},
- {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"},
- {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"},
- {"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"},
- {"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"},
- {"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"},
- {"crypto/internal/fips140/ssh", "'_"},
- {"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"},
- {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"},
- {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"},
- {"crypto/internal/fips140cache", "\xae\x02\r&"},
+ {"crypto/internal/cryptotest", "\x03\r\n\b&\x0f\x19\x06\x13\x12 \x04\x06\t\x19\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
+ {"crypto/internal/entropy", "J"},
+ {"crypto/internal/entropy/v1.0.0", "C0\x95\x018\x13"},
+ {"crypto/internal/fips140", "B1\xbf\x01\v\x16"},
+ {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x14\x05\x01\x01\x06+\x95\x014"},
+ {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x12\x05\x01\a+\x92\x01"},
+ {"crypto/internal/fips140/alias", "\xd3\x02"},
+ {"crypto/internal/fips140/bigmod", "'\x19\x01\a+\x95\x01"},
+ {"crypto/internal/fips140/check", "\"\x0e\a\t\x02\xb7\x01Z"},
+ {"crypto/internal/fips140/check/checktest", "'\x8b\x02!"},
+ {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x14\x05\t\x01)\x86\x01\x0f7\x01"},
+ {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\n\r3\x86\x01\x0f7"},
+ {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x03\x06:\x16pF"},
+ {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\f:\xc9\x01\x03"},
+ {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x123\x95\x017"},
+ {"crypto/internal/fips140/edwards25519/field", "'\x14\x053\x95\x01"},
+ {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\a<\x16"},
+ {"crypto/internal/fips140/hmac", "\x03\x1f\x15\x01\x01:\x16"},
+ {"crypto/internal/fips140/mldsa", "\x03\x1b\x04\x05\x02\x0e\x01\x03\x053\x95\x017"},
+ {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0f\x03\x053\xcc\x01"},
+ {"crypto/internal/fips140/nistec", "\x1e\t\r\f3\x95\x01*\r\x14"},
+ {"crypto/internal/fips140/nistec/fiat", "'\x148\x95\x01"},
+ {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\a<\x16"},
+ {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\x0e\x01\x01\x028\x16pF"},
+ {"crypto/internal/fips140/sha256", "\x03\x1f\x1e\x01\a+\x16\x7f"},
+ {"crypto/internal/fips140/sha3", "\x03\x1f\x19\x05\x012\x95\x01K"},
+ {"crypto/internal/fips140/sha512", "\x03\x1f\x1e\x01\a+\x16\x7f"},
+ {"crypto/internal/fips140/ssh", "'b"},
+ {"crypto/internal/fips140/subtle", "\x1e\a\x1b\xc8\x01"},
+ {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\a\x02:\x16"},
+ {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\b\t3\x16"},
+ {"crypto/internal/fips140cache", "\xb2\x02\r&"},
{"crypto/internal/fips140deps", ""},
- {"crypto/internal/fips140deps/byteorder", "\x9c\x01"},
- {"crypto/internal/fips140deps/cpu", "\xb1\x01\a"},
- {"crypto/internal/fips140deps/godebug", "\xb9\x01"},
- {"crypto/internal/fips140deps/time", "\xc9\x02"},
- {"crypto/internal/fips140hash", "7\x1c3\xc9\x01"},
- {"crypto/internal/fips140only", ")\r\x01\x01N3<"},
+ {"crypto/internal/fips140deps/byteorder", "\x9f\x01"},
+ {"crypto/internal/fips140deps/cpu", "\xb4\x01\a"},
+ {"crypto/internal/fips140deps/godebug", "\xbc\x01"},
+ {"crypto/internal/fips140deps/time", "\xcd\x02"},
+ {"crypto/internal/fips140hash", "8\x1d4\xca\x01"},
+ {"crypto/internal/fips140only", ")\x0e\x01\x01P3="},
{"crypto/internal/fips140test", ""},
- {"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"},
- {"crypto/internal/impl", "\xb9\x02"},
- {"crypto/internal/randutil", "\xf5\x01\x12"},
- {"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"},
- {"crypto/internal/sysrand/internal/seccomp", "q"},
- {"crypto/md5", "\x0e6-\x15\x16h"},
- {"crypto/mlkem", "1"},
- {"crypto/pbkdf2", "4\x0f\x01-\x15"},
- {"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"},
- {"crypto/rc4", "%\x1f-\xc7\x01"},
- {"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"},
- {"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"},
- {"crypto/sha256", "\x0e\f\x1cP"},
- {"crypto/sha3", "\x0e)O\xc9\x01"},
- {"crypto/sha512", "\x0e\f\x1eN"},
- {"crypto/subtle", "\x1e\x1c\x9c\x01X"},
- {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
- {"crypto/tls/internal/fips140tls", "\x17\xa5\x02"},
- {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
- {"crypto/x509/pkix", "g\x06\a\x8e\x01G"},
- {"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
- {"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"},
- {"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"},
- {"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"},
- {"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"},
- {"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"},
- {"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"},
- {"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"},
- {"debug/plan9obj", "j\a\x03e\x1c,"},
- {"embed", "q*A\x19\x01S"},
+ {"crypto/internal/hpke", "\x03\v\x01\x01\x03\x055\x03\x04\x01\x01\x16\a\x03\x13\xcc\x01"},
+ {"crypto/internal/impl", "\xbd\x02"},
+ {"crypto/internal/randutil", "\xf9\x01\x12"},
+ {"crypto/internal/sysrand", "sq! \r\r\x01\x01\f\x06"},
+ {"crypto/internal/sysrand/internal/seccomp", "s"},
+ {"crypto/md5", "\x0e7.\x16\x16i"},
+ {"crypto/mlkem", "\x0e$"},
+ {"crypto/mlkem/mlkemtest", "2\x1b&"},
+ {"crypto/pbkdf2", "5\x0f\x01.\x16"},
+ {"crypto/rand", "\x1a\b\a\x1c\x04\x01)\x86\x01\rM"},
+ {"crypto/rc4", "% .\xc9\x01"},
+ {"crypto/rsa", "\x0e\f\x01\v\x10\x0e\x01\x04\a\a\x1c\x03\x133=\f\x01"},
+ {"crypto/sha1", "\x0e\f+\x03+\x16\x16\x15T"},
+ {"crypto/sha256", "\x0e\f\x1dR"},
+ {"crypto/sha3", "\x0e*Q\xca\x01"},
+ {"crypto/sha512", "\x0e\f\x1fP"},
+ {"crypto/subtle", "\x1e\x1d\x9f\x01X"},
+ {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\x0e\n\x01\n\x05\x04\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x15\b=\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
+ {"crypto/tls/internal/fips140tls", "\x17\xa9\x02"},
+ {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x016\x06\x01\x01\x02\x05\x0e\x06\x02\x02\x03F\x03:\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\a\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
+ {"crypto/x509/pkix", "i\x06\a\x90\x01G"},
+ {"database/sql", "\x03\nP\x16\x03\x83\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
+ {"database/sql/driver", "\rf\x03\xb7\x01\x0f\x11"},
+ {"debug/buildinfo", "\x03]\x02\x01\x01\b\a\x03g\x1a\x02\x01+\x0f\x1f"},
+ {"debug/dwarf", "\x03i\a\x03\x83\x011\x11\x01\x01"},
+ {"debug/elf", "\x03\x06V\r\a\x03g\x1b\x01\f \x17\x01\x16"},
+ {"debug/gosym", "\x03i\n\xc5\x01\x01\x01\x02"},
+ {"debug/macho", "\x03\x06V\r\ng\x1c,\x17\x01"},
+ {"debug/pe", "\x03\x06V\r\a\x03g\x1c,\x17\x01\x16"},
+ {"debug/plan9obj", "l\a\x03g\x1c,"},
+ {"embed", "s+B\x19\x01S"},
{"embed/internal/embedtest", ""},
{"encoding", ""},
- {"encoding/ascii85", "\xf5\x01C"},
- {"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"},
- {"encoding/base32", "\xf5\x01A\x02"},
- {"encoding/base64", "\x9c\x01YA\x02"},
- {"encoding/binary", "q\x84\x01\f(\r\x05"},
- {"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"},
- {"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
- {"encoding/hex", "q\x03\x81\x01A\x03"},
- {"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
- {"encoding/pem", "\x03f\b\x84\x01A\x03"},
- {"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"},
- {"errors", "\xcc\x01\x83\x01"},
- {"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"},
- {"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"},
- {"fmt", "qE&\x19\f \b\r\x02\x03\x12"},
- {"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"},
- {"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
- {"go/build/constraint", "q\xc7\x01\x01\x12\x02"},
- {"go/constant", "t\x0f~\x01\x024\x01\x02\x12"},
- {"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"},
- {"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"},
- {"go/format", "\x03q\x01\v\x01\x02rD"},
- {"go/importer", "v\a\x01\x01\x04\x01q9"},
- {"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"},
- {"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"},
- {"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"},
- {"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"},
- {"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"},
- {"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"},
- {"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"},
- {"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
- {"go/version", "\xbe\x01{"},
- {"hash", "\xf5\x01"},
- {"hash/adler32", "q\x15\x16"},
- {"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"},
- {"hash/crc64", "q\x15\x16\x9f\x01"},
- {"hash/fnv", "q\x15\x16h"},
- {"hash/maphash", "\x86\x01\x11<|"},
- {"html", "\xb9\x02\x02\x12"},
- {"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
- {"image", "\x02o\x1ef\x0f4\x03\x01"},
+ {"encoding/ascii85", "\xf9\x01C"},
+ {"encoding/asn1", "\x03p\x03g(\x01'\r\x02\x01\x10\x03\x01"},
+ {"encoding/base32", "\xf9\x01A\x02"},
+ {"encoding/base64", "\x9f\x01ZA\x02"},
+ {"encoding/binary", "s\x86\x01\f(\r\x05"},
+ {"encoding/csv", "\x02\x01p\x03\x83\x01D\x12\x02"},
+ {"encoding/gob", "\x02e\x05\a\x03g\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
+ {"encoding/hex", "s\x03\x83\x01A\x03"},
+ {"encoding/json", "\x03\x01c\x04\b\x03\x83\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
+ {"encoding/pem", "\x03h\b\x86\x01A\x03"},
+ {"encoding/xml", "\x02\x01d\f\x03\x83\x014\x05\n\x01\x02\x10\x02"},
+ {"errors", "\xcf\x01\x84\x01"},
+ {"expvar", "pLA\b\v\x15\r\b\x02\x03\x01\x11"},
+ {"flag", "g\f\x03\x83\x01,\b\x05\b\x02\x01\x10"},
+ {"fmt", "sF'\x19\f \b\r\x02\x03\x12"},
+ {"go/ast", "\x03\x01r\x0f\x01s\x03)\b\r\x02\x01\x12\x02"},
+ {"go/build", "\x02\x01p\x03\x01\x02\x02\b\x02\x01\x17\x1f\x04\x02\b\x1c\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
+ {"go/build/constraint", "s\xc9\x01\x01\x12\x02"},
+ {"go/constant", "v\x10\x7f\x01\x024\x01\x02\x12"},
+ {"go/doc", "\x04r\x01\x05\n=61\x10\x02\x01\x12\x02"},
+ {"go/doc/comment", "\x03s\xc4\x01\x01\x01\x01\x12\x02"},
+ {"go/format", "\x03s\x01\f\x01\x02sD"},
+ {"go/importer", "x\a\x01\x02\x04\x01r9"},
+ {"go/internal/gccgoimporter", "\x02\x01]\x13\x03\x04\f\x01p\x02,\x01\x05\x11\x01\f\b"},
+ {"go/internal/gcimporter", "\x02t\x10\x010\x05\r0,\x15\x03\x02"},
+ {"go/internal/scannerhooks", "\x86\x01"},
+ {"go/internal/srcimporter", "v\x01\x01\v\x03\x01r,\x01\x05\x12\x02\x14"},
+ {"go/parser", "\x03p\x03\x01\x02\b\x04\x01s\x01+\x06\x12"},
+ {"go/printer", "v\x01\x02\x03\ns\f \x15\x02\x01\x02\v\x05\x02"},
+ {"go/scanner", "\x03s\v\x05s2\x10\x01\x13\x02"},
+ {"go/token", "\x04r\x86\x01>\x02\x03\x01\x0f\x02"},
+ {"go/types", "\x03\x01\x06i\x03\x01\x03\t\x03\x024\x063\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
+ {"go/version", "\xc1\x01|"},
+ {"hash", "\xf9\x01"},
+ {"hash/adler32", "s\x16\x16"},
+ {"hash/crc32", "s\x16\x16\x15\x8b\x01\x01\x13"},
+ {"hash/crc64", "s\x16\x16\xa0\x01"},
+ {"hash/fnv", "s\x16\x16i"},
+ {"hash/maphash", "\x89\x01\x11<}"},
+ {"html", "\xbd\x02\x02\x12"},
+ {"html/template", "\x03m\x06\x19-=\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
+ {"image", "\x02q\x1fg\x0f4\x03\x01"},
{"image/color", ""},
- {"image/color/palette", "\x8f\x01"},
- {"image/draw", "\x8e\x01\x01\x04"},
- {"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"},
- {"image/internal/imageutil", "\x8e\x01"},
- {"image/jpeg", "\x02o\x1d\x01\x04b"},
- {"image/png", "\x02\aa\n\x12\x02\x06\x01fC"},
- {"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"},
- {"internal/abi", "\xb8\x01\x97\x01"},
- {"internal/asan", "\xcf\x02"},
- {"internal/bisect", "\xae\x02\r\x01"},
- {"internal/buildcfg", "tGf\x06\x02\x05\n\x01"},
- {"internal/bytealg", "\xb1\x01\x9e\x01"},
+ {"image/color/palette", "\x92\x01"},
+ {"image/draw", "\x91\x01\x01\x04"},
+ {"image/gif", "\x02\x01\x05k\x03\x1b\x01\x01\x01\vZ\x0f"},
+ {"image/internal/imageutil", "\x91\x01"},
+ {"image/jpeg", "\x02q\x1e\x01\x04c"},
+ {"image/png", "\x02\ac\n\x13\x02\x06\x01gC"},
+ {"index/suffixarray", "\x03i\a\x86\x01\f+\n\x01"},
+ {"internal/abi", "\xbb\x01\x98\x01"},
+ {"internal/asan", "\xd3\x02"},
+ {"internal/bisect", "\xb2\x02\r\x01"},
+ {"internal/buildcfg", "vHg\x06\x02\x05\n\x01"},
+ {"internal/bytealg", "\xb4\x01\x9f\x01"},
{"internal/byteorder", ""},
{"internal/cfg", ""},
- {"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"},
- {"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"},
+ {"internal/cgrouptest", "v[T\x06\x0f\x02\x01\x04\x01"},
+ {"internal/chacha8rand", "\x9f\x01\x15\a\x98\x01"},
{"internal/copyright", ""},
{"internal/coverage", ""},
{"internal/coverage/calloc", ""},
- {"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"},
- {"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"},
- {"internal/coverage/cmerge", "t-`"},
- {"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"},
- {"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"},
- {"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"},
- {"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."},
- {"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"},
- {"internal/coverage/rtcov", "\xcf\x02"},
- {"internal/coverage/slicereader", "j\n\x81\x01Z"},
- {"internal/coverage/slicewriter", "t\x81\x01"},
- {"internal/coverage/stringtab", "t8\x04E"},
+ {"internal/coverage/cfile", "p\x06\x17\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02',\x06\a\n\x01\x03\r\x06"},
+ {"internal/coverage/cformat", "\x04r.\x04Q\v6\x01\x02\r"},
+ {"internal/coverage/cmerge", "v.a"},
+ {"internal/coverage/decodecounter", "l\n.\v\x02H,\x17\x17"},
+ {"internal/coverage/decodemeta", "\x02j\n\x17\x17\v\x02H,"},
+ {"internal/coverage/encodecounter", "\x02j\n.\f\x01\x02F\v!\x15"},
+ {"internal/coverage/encodemeta", "\x02\x01i\n\x13\x04\x17\r\x02F,."},
+ {"internal/coverage/pods", "\x04r.\x81\x01\x06\x05\n\x02\x01"},
+ {"internal/coverage/rtcov", "\xd3\x02"},
+ {"internal/coverage/slicereader", "l\n\x83\x01Z"},
+ {"internal/coverage/slicewriter", "v\x83\x01"},
+ {"internal/coverage/stringtab", "v9\x04F"},
{"internal/coverage/test", ""},
{"internal/coverage/uleb128", ""},
- {"internal/cpu", "\xcf\x02"},
- {"internal/dag", "\x04p\xc2\x01\x03"},
- {"internal/diff", "\x03q\xc3\x01\x02"},
- {"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"},
- {"internal/filepathlite", "q*A\x1a@"},
- {"internal/fmtsort", "\x04\xa5\x02\r"},
- {"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
+ {"internal/cpu", "\xd3\x02"},
+ {"internal/dag", "\x04r\xc4\x01\x03"},
+ {"internal/diff", "\x03s\xc5\x01\x02"},
+ {"internal/exportdata", "\x02\x01p\x03\x02e\x1c,\x01\x05\x11\x01\x02"},
+ {"internal/filepathlite", "s+B\x1a@"},
+ {"internal/fmtsort", "\x04\xa9\x02\r"},
+ {"internal/fuzz", "\x03\nG\x18\x04\x03\x03\x01\f\x036=\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
{"internal/goarch", ""},
- {"internal/godebug", "\x99\x01!\x81\x01\x01\x13"},
+ {"internal/godebug", "\x9c\x01!\x82\x01\x01\x13"},
{"internal/godebugs", ""},
{"internal/goexperiment", ""},
{"internal/goos", ""},
- {"internal/goroot", "\xa1\x02\x01\x05\x12\x02"},
+ {"internal/goroot", "\xa5\x02\x01\x05\x12\x02"},
{"internal/gover", "\x04"},
{"internal/goversion", ""},
- {"internal/lazyregexp", "\xa1\x02\v\r\x02"},
- {"internal/lazytemplate", "\xf5\x01,\x18\x02\f"},
- {"internal/msan", "\xcf\x02"},
+ {"internal/lazyregexp", "\xa5\x02\v\r\x02"},
+ {"internal/lazytemplate", "\xf9\x01,\x18\x02\f"},
+ {"internal/msan", "\xd3\x02"},
{"internal/nettrace", ""},
- {"internal/obscuretestdata", "i\x8c\x01,"},
- {"internal/oserror", "q"},
- {"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"},
+ {"internal/obscuretestdata", "k\x8e\x01,"},
+ {"internal/oserror", "s"},
+ {"internal/pkgbits", "\x03Q\x18\a\x03\x04\fs\r\x1f\r\n\x01"},
{"internal/platform", ""},
- {"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"},
- {"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"},
+ {"internal/poll", "sl\x05\x159\r\x01\x01\f\x06"},
+ {"internal/profile", "\x03\x04l\x03\x83\x017\n\x01\x01\x01\x10"},
{"internal/profilerecord", ""},
- {"internal/race", "\x97\x01\xb8\x01"},
- {"internal/reflectlite", "\x97\x01!:\x16"},
- {"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"},
- {"weak", "\x97\x01\x97\x01!"},
+ {"vendor/golang.org/x/crypto/internal/alias", "\xd3\x02"},
+ {"vendor/golang.org/x/crypto/internal/poly1305", "W\x15\x9c\x01"},
+ {"vendor/golang.org/x/net/dns/dnsmessage", "s\xc7\x01"},
+ {"vendor/golang.org/x/net/http/httpguts", "\x8f\x02\x14\x1a\x14\r"},
+ {"vendor/golang.org/x/net/http/httpproxy", "s\x03\x99\x01\x10\x05\x01\x18\x14\r"},
+ {"vendor/golang.org/x/net/http2/hpack", "\x03p\x03\x83\x01F"},
+ {"vendor/golang.org/x/net/idna", "v\x8f\x018\x14\x10\x02\x01"},
+ {"vendor/golang.org/x/net/nettest", "\x03i\a\x03\x83\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"},
+ {"vendor/golang.org/x/sys/cpu", "\xa5\x02\r\n\x01\x16"},
+ {"vendor/golang.org/x/text/secure/bidirule", "s\xde\x01\x11\x01"},
+ {"vendor/golang.org/x/text/transform", "\x03p\x86\x01X"},
+ {"vendor/golang.org/x/text/unicode/bidi", "\x03\bk\x87\x01>\x16"},
+ {"vendor/golang.org/x/text/unicode/norm", "l\n\x83\x01F\x12\x11"},
+ {"weak", "\x9a\x01\x98\x01!"},
}
// bootstrap is the list of bootstrap packages extracted from cmd/dist.
@@ -385,6 +389,7 @@ var bootstrap = map[string]bool{
"cmd/compile/internal/arm64": true,
"cmd/compile/internal/base": true,
"cmd/compile/internal/bitvec": true,
+ "cmd/compile/internal/bloop": true,
"cmd/compile/internal/compare": true,
"cmd/compile/internal/coverage": true,
"cmd/compile/internal/deadlocals": true,
@@ -413,6 +418,7 @@ var bootstrap = map[string]bool{
"cmd/compile/internal/riscv64": true,
"cmd/compile/internal/rttype": true,
"cmd/compile/internal/s390x": true,
+ "cmd/compile/internal/slice": true,
"cmd/compile/internal/ssa": true,
"cmd/compile/internal/ssagen": true,
"cmd/compile/internal/staticdata": true,
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index 362f23c43..f1e24625a 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -16,6 +16,14 @@ var PackageSymbols = map[string][]Symbol{
{"(*Writer).Flush", Method, 0, ""},
{"(*Writer).Write", Method, 0, ""},
{"(*Writer).WriteHeader", Method, 0, ""},
+ {"(FileInfoNames).Gname", Method, 23, ""},
+ {"(FileInfoNames).IsDir", Method, 23, ""},
+ {"(FileInfoNames).ModTime", Method, 23, ""},
+ {"(FileInfoNames).Mode", Method, 23, ""},
+ {"(FileInfoNames).Name", Method, 23, ""},
+ {"(FileInfoNames).Size", Method, 23, ""},
+ {"(FileInfoNames).Sys", Method, 23, ""},
+ {"(FileInfoNames).Uname", Method, 23, ""},
{"(Format).String", Method, 10, ""},
{"ErrFieldTooLong", Var, 0, ""},
{"ErrHeader", Var, 0, ""},
@@ -338,6 +346,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*Writer).Write", Method, 0, ""},
{"(CorruptInputError).Error", Method, 0, ""},
{"(InternalError).Error", Method, 0, ""},
+ {"(Reader).Read", Method, 0, ""},
+ {"(Reader).ReadByte", Method, 0, ""},
+ {"(Resetter).Reset", Method, 4, ""},
{"BestCompression", Const, 0, ""},
{"BestSpeed", Const, 0, ""},
{"CorruptInputError", Type, 0, ""},
@@ -409,6 +420,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Writer).Flush", Method, 0, ""},
{"(*Writer).Reset", Method, 2, ""},
{"(*Writer).Write", Method, 0, ""},
+ {"(Resetter).Reset", Method, 4, ""},
{"BestCompression", Const, 0, ""},
{"BestSpeed", Const, 0, ""},
{"DefaultCompression", Const, 0, ""},
@@ -426,6 +438,11 @@ var PackageSymbols = map[string][]Symbol{
{"Writer", Type, 0, ""},
},
"container/heap": {
+ {"(Interface).Len", Method, 0, ""},
+ {"(Interface).Less", Method, 0, ""},
+ {"(Interface).Pop", Method, 0, ""},
+ {"(Interface).Push", Method, 0, ""},
+ {"(Interface).Swap", Method, 0, ""},
{"Fix", Func, 2, "func(h Interface, i int)"},
{"Init", Func, 0, "func(h Interface)"},
{"Interface", Type, 0, ""},
@@ -469,6 +486,10 @@ var PackageSymbols = map[string][]Symbol{
{"Ring.Value", Field, 0, ""},
},
"context": {
+ {"(Context).Deadline", Method, 7, ""},
+ {"(Context).Done", Method, 7, ""},
+ {"(Context).Err", Method, 7, ""},
+ {"(Context).Value", Method, 7, ""},
{"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"},
{"Background", Func, 7, "func() Context"},
{"CancelCauseFunc", Type, 20, ""},
@@ -488,17 +509,31 @@ var PackageSymbols = map[string][]Symbol{
{"WithoutCancel", Func, 21, "func(parent Context) Context"},
},
"crypto": {
+ {"(Decapsulator).Decapsulate", Method, 26, ""},
+ {"(Decapsulator).Encapsulator", Method, 26, ""},
+ {"(Decrypter).Decrypt", Method, 5, ""},
+ {"(Decrypter).Public", Method, 5, ""},
+ {"(Encapsulator).Bytes", Method, 26, ""},
+ {"(Encapsulator).Encapsulate", Method, 26, ""},
{"(Hash).Available", Method, 0, ""},
{"(Hash).HashFunc", Method, 4, ""},
{"(Hash).New", Method, 0, ""},
{"(Hash).Size", Method, 0, ""},
{"(Hash).String", Method, 15, ""},
+ {"(MessageSigner).Public", Method, 25, ""},
+ {"(MessageSigner).Sign", Method, 25, ""},
+ {"(MessageSigner).SignMessage", Method, 25, ""},
+ {"(Signer).Public", Method, 4, ""},
+ {"(Signer).Sign", Method, 4, ""},
+ {"(SignerOpts).HashFunc", Method, 4, ""},
{"BLAKE2b_256", Const, 9, ""},
{"BLAKE2b_384", Const, 9, ""},
{"BLAKE2b_512", Const, 9, ""},
{"BLAKE2s_256", Const, 9, ""},
+ {"Decapsulator", Type, 26, ""},
{"Decrypter", Type, 5, ""},
{"DecrypterOpts", Type, 5, ""},
+ {"Encapsulator", Type, 26, ""},
{"Hash", Type, 0, ""},
{"MD4", Const, 0, ""},
{"MD5", Const, 0, ""},
@@ -530,6 +565,16 @@ var PackageSymbols = map[string][]Symbol{
{"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
},
"crypto/cipher": {
+ {"(AEAD).NonceSize", Method, 2, ""},
+ {"(AEAD).Open", Method, 2, ""},
+ {"(AEAD).Overhead", Method, 2, ""},
+ {"(AEAD).Seal", Method, 2, ""},
+ {"(Block).BlockSize", Method, 0, ""},
+ {"(Block).Decrypt", Method, 0, ""},
+ {"(Block).Encrypt", Method, 0, ""},
+ {"(BlockMode).BlockSize", Method, 0, ""},
+ {"(BlockMode).CryptBlocks", Method, 0, ""},
+ {"(Stream).XORKeyStream", Method, 0, ""},
{"(StreamReader).Read", Method, 0, ""},
{"(StreamWriter).Close", Method, 0, ""},
{"(StreamWriter).Write", Method, 0, ""},
@@ -594,7 +639,13 @@ var PackageSymbols = map[string][]Symbol{
{"(*PublicKey).Bytes", Method, 20, ""},
{"(*PublicKey).Curve", Method, 20, ""},
{"(*PublicKey).Equal", Method, 20, ""},
- {"Curve", Type, 20, ""},
+ {"(Curve).GenerateKey", Method, 20, ""},
+ {"(Curve).NewPrivateKey", Method, 20, ""},
+ {"(Curve).NewPublicKey", Method, 20, ""},
+ {"(KeyExchanger).Curve", Method, 26, ""},
+ {"(KeyExchanger).ECDH", Method, 26, ""},
+ {"(KeyExchanger).PublicKey", Method, 26, ""},
+ {"KeyExchanger", Type, 26, ""},
{"P256", Func, 20, "func() Curve"},
{"P384", Func, 20, "func() Curve"},
{"P521", Func, 20, "func() Curve"},
@@ -667,6 +718,12 @@ var PackageSymbols = map[string][]Symbol{
{"(*CurveParams).Params", Method, 0, ""},
{"(*CurveParams).ScalarBaseMult", Method, 0, ""},
{"(*CurveParams).ScalarMult", Method, 0, ""},
+ {"(Curve).Add", Method, 0, ""},
+ {"(Curve).Double", Method, 0, ""},
+ {"(Curve).IsOnCurve", Method, 0, ""},
+ {"(Curve).Params", Method, 0, ""},
+ {"(Curve).ScalarBaseMult", Method, 0, ""},
+ {"(Curve).ScalarMult", Method, 0, ""},
{"Curve", Type, 0, ""},
{"CurveParams", Type, 0, ""},
{"CurveParams.B", Field, 0, ""},
@@ -688,6 +745,7 @@ var PackageSymbols = map[string][]Symbol{
},
"crypto/fips140": {
{"Enabled", Func, 24, "func() bool"},
+ {"Version", Func, 26, "func() string"},
},
"crypto/hkdf": {
{"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"},
@@ -708,9 +766,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*DecapsulationKey1024).Bytes", Method, 24, ""},
{"(*DecapsulationKey1024).Decapsulate", Method, 24, ""},
{"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""},
+ {"(*DecapsulationKey1024).Encapsulator", Method, 26, ""},
{"(*DecapsulationKey768).Bytes", Method, 24, ""},
{"(*DecapsulationKey768).Decapsulate", Method, 24, ""},
{"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""},
+ {"(*DecapsulationKey768).Encapsulator", Method, 26, ""},
{"(*EncapsulationKey1024).Bytes", Method, 24, ""},
{"(*EncapsulationKey1024).Encapsulate", Method, 24, ""},
{"(*EncapsulationKey768).Bytes", Method, 24, ""},
@@ -732,6 +792,10 @@ var PackageSymbols = map[string][]Symbol{
{"SeedSize", Const, 24, ""},
{"SharedKeySize", Const, 24, ""},
},
+ "crypto/mlkem/mlkemtest": {
+ {"Encapsulate1024", Func, 26, "func(ek *mlkem.EncapsulationKey1024, random []byte) (sharedKey []byte, ciphertext []byte, err error)"},
+ {"Encapsulate768", Func, 26, "func(ek *mlkem.EncapsulationKey768, random []byte) (sharedKey []byte, ciphertext []byte, err error)"},
+ },
"crypto/pbkdf2": {
{"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"},
},
@@ -769,6 +833,7 @@ var PackageSymbols = map[string][]Symbol{
{"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"},
{"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"},
{"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"},
+ {"EncryptOAEPWithOptions", Func, 26, "func(random io.Reader, pub *PublicKey, msg []byte, opts *OAEPOptions) ([]byte, error)"},
{"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"},
{"ErrDecryption", Var, 0, ""},
{"ErrMessageTooLong", Var, 0, ""},
@@ -921,6 +986,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*SessionState).Bytes", Method, 21, ""},
{"(AlertError).Error", Method, 21, ""},
{"(ClientAuthType).String", Method, 15, ""},
+ {"(ClientSessionCache).Get", Method, 3, ""},
+ {"(ClientSessionCache).Put", Method, 3, ""},
{"(CurveID).String", Method, 15, ""},
{"(QUICEncryptionLevel).String", Method, 21, ""},
{"(RecordHeaderError).Error", Method, 6, ""},
@@ -953,6 +1020,7 @@ var PackageSymbols = map[string][]Symbol{
{"ClientHelloInfo.CipherSuites", Field, 4, ""},
{"ClientHelloInfo.Conn", Field, 8, ""},
{"ClientHelloInfo.Extensions", Field, 24, ""},
+ {"ClientHelloInfo.HelloRetryRequest", Field, 26, ""},
{"ClientHelloInfo.ServerName", Field, 4, ""},
{"ClientHelloInfo.SignatureSchemes", Field, 8, ""},
{"ClientHelloInfo.SupportedCurves", Field, 4, ""},
@@ -1001,6 +1069,7 @@ var PackageSymbols = map[string][]Symbol{
{"ConnectionState.DidResume", Field, 1, ""},
{"ConnectionState.ECHAccepted", Field, 23, ""},
{"ConnectionState.HandshakeComplete", Field, 0, ""},
+ {"ConnectionState.HelloRetryRequest", Field, 26, ""},
{"ConnectionState.NegotiatedProtocol", Field, 0, ""},
{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""},
{"ConnectionState.OCSPResponse", Field, 5, ""},
@@ -1055,8 +1124,10 @@ var PackageSymbols = map[string][]Symbol{
{"QUICEncryptionLevelEarly", Const, 21, ""},
{"QUICEncryptionLevelHandshake", Const, 21, ""},
{"QUICEncryptionLevelInitial", Const, 21, ""},
+ {"QUICErrorEvent", Const, 26, ""},
{"QUICEvent", Type, 21, ""},
{"QUICEvent.Data", Field, 21, ""},
+ {"QUICEvent.Err", Field, 26, ""},
{"QUICEvent.Kind", Field, 21, ""},
{"QUICEvent.Level", Field, 21, ""},
{"QUICEvent.SessionState", Field, 23, ""},
@@ -1151,8 +1222,10 @@ var PackageSymbols = map[string][]Symbol{
{"(*RevocationList).CheckSignatureFrom", Method, 19, ""},
{"(CertificateInvalidError).Error", Method, 0, ""},
{"(ConstraintViolationError).Error", Method, 0, ""},
+ {"(ExtKeyUsage).String", Method, 26, ""},
{"(HostnameError).Error", Method, 0, ""},
{"(InsecureAlgorithmError).Error", Method, 6, ""},
+ {"(KeyUsage).String", Method, 26, ""},
{"(OID).AppendBinary", Method, 24, ""},
{"(OID).AppendText", Method, 24, ""},
{"(OID).Equal", Method, 22, ""},
@@ -1516,6 +1589,9 @@ var PackageSymbols = map[string][]Symbol{
{"(NullInt64).Value", Method, 0, ""},
{"(NullString).Value", Method, 0, ""},
{"(NullTime).Value", Method, 13, ""},
+ {"(Result).LastInsertId", Method, 0, ""},
+ {"(Result).RowsAffected", Method, 0, ""},
+ {"(Scanner).Scan", Method, 0, ""},
{"ColumnType", Type, 8, ""},
{"Conn", Type, 9, ""},
{"DB", Type, 0, ""},
@@ -1547,8 +1623,6 @@ var PackageSymbols = map[string][]Symbol{
{"NamedArg.Name", Field, 8, ""},
{"NamedArg.Value", Field, 8, ""},
{"Null", Type, 22, ""},
- {"Null.V", Field, 22, ""},
- {"Null.Valid", Field, 22, ""},
{"NullBool", Type, 0, ""},
{"NullBool.Bool", Field, 0, ""},
{"NullBool.Valid", Field, 0, ""},
@@ -1591,10 +1665,72 @@ var PackageSymbols = map[string][]Symbol{
{"TxOptions.ReadOnly", Field, 8, ""},
},
"database/sql/driver": {
+ {"(ColumnConverter).ColumnConverter", Method, 0, ""},
+ {"(Conn).Begin", Method, 0, ""},
+ {"(Conn).Close", Method, 0, ""},
+ {"(Conn).Prepare", Method, 0, ""},
+ {"(ConnBeginTx).BeginTx", Method, 8, ""},
+ {"(ConnPrepareContext).PrepareContext", Method, 8, ""},
+ {"(Connector).Connect", Method, 10, ""},
+ {"(Connector).Driver", Method, 10, ""},
+ {"(Driver).Open", Method, 0, ""},
+ {"(DriverContext).OpenConnector", Method, 10, ""},
+ {"(Execer).Exec", Method, 0, ""},
+ {"(ExecerContext).ExecContext", Method, 8, ""},
+ {"(NamedValueChecker).CheckNamedValue", Method, 9, ""},
{"(NotNull).ConvertValue", Method, 0, ""},
{"(Null).ConvertValue", Method, 0, ""},
+ {"(Pinger).Ping", Method, 8, ""},
+ {"(Queryer).Query", Method, 1, ""},
+ {"(QueryerContext).QueryContext", Method, 8, ""},
+ {"(Result).LastInsertId", Method, 0, ""},
+ {"(Result).RowsAffected", Method, 0, ""},
+ {"(Rows).Close", Method, 0, ""},
+ {"(Rows).Columns", Method, 0, ""},
+ {"(Rows).Next", Method, 0, ""},
{"(RowsAffected).LastInsertId", Method, 0, ""},
{"(RowsAffected).RowsAffected", Method, 0, ""},
+ {"(RowsColumnScanner).Close", Method, 26, ""},
+ {"(RowsColumnScanner).Columns", Method, 26, ""},
+ {"(RowsColumnScanner).Next", Method, 26, ""},
+ {"(RowsColumnScanner).ScanColumn", Method, 26, ""},
+ {"(RowsColumnTypeDatabaseTypeName).Close", Method, 8, ""},
+ {"(RowsColumnTypeDatabaseTypeName).ColumnTypeDatabaseTypeName", Method, 8, ""},
+ {"(RowsColumnTypeDatabaseTypeName).Columns", Method, 8, ""},
+ {"(RowsColumnTypeDatabaseTypeName).Next", Method, 8, ""},
+ {"(RowsColumnTypeLength).Close", Method, 8, ""},
+ {"(RowsColumnTypeLength).ColumnTypeLength", Method, 8, ""},
+ {"(RowsColumnTypeLength).Columns", Method, 8, ""},
+ {"(RowsColumnTypeLength).Next", Method, 8, ""},
+ {"(RowsColumnTypeNullable).Close", Method, 8, ""},
+ {"(RowsColumnTypeNullable).ColumnTypeNullable", Method, 8, ""},
+ {"(RowsColumnTypeNullable).Columns", Method, 8, ""},
+ {"(RowsColumnTypeNullable).Next", Method, 8, ""},
+ {"(RowsColumnTypePrecisionScale).Close", Method, 8, ""},
+ {"(RowsColumnTypePrecisionScale).ColumnTypePrecisionScale", Method, 8, ""},
+ {"(RowsColumnTypePrecisionScale).Columns", Method, 8, ""},
+ {"(RowsColumnTypePrecisionScale).Next", Method, 8, ""},
+ {"(RowsColumnTypeScanType).Close", Method, 8, ""},
+ {"(RowsColumnTypeScanType).ColumnTypeScanType", Method, 8, ""},
+ {"(RowsColumnTypeScanType).Columns", Method, 8, ""},
+ {"(RowsColumnTypeScanType).Next", Method, 8, ""},
+ {"(RowsNextResultSet).Close", Method, 8, ""},
+ {"(RowsNextResultSet).Columns", Method, 8, ""},
+ {"(RowsNextResultSet).HasNextResultSet", Method, 8, ""},
+ {"(RowsNextResultSet).Next", Method, 8, ""},
+ {"(RowsNextResultSet).NextResultSet", Method, 8, ""},
+ {"(SessionResetter).ResetSession", Method, 10, ""},
+ {"(Stmt).Close", Method, 0, ""},
+ {"(Stmt).Exec", Method, 0, ""},
+ {"(Stmt).NumInput", Method, 0, ""},
+ {"(Stmt).Query", Method, 0, ""},
+ {"(StmtExecContext).ExecContext", Method, 8, ""},
+ {"(StmtQueryContext).QueryContext", Method, 8, ""},
+ {"(Tx).Commit", Method, 0, ""},
+ {"(Tx).Rollback", Method, 0, ""},
+ {"(Validator).IsValid", Method, 15, ""},
+ {"(ValueConverter).ConvertValue", Method, 0, ""},
+ {"(Valuer).Value", Method, 0, ""},
{"Bool", Var, 0, ""},
{"ColumnConverter", Type, 0, ""},
{"Conn", Type, 0, ""},
@@ -1756,6 +1892,9 @@ var PackageSymbols = map[string][]Symbol{
{"(DecodeError).Error", Method, 0, ""},
{"(Tag).GoString", Method, 0, ""},
{"(Tag).String", Method, 0, ""},
+ {"(Type).Common", Method, 0, ""},
+ {"(Type).Size", Method, 0, ""},
+ {"(Type).String", Method, 0, ""},
{"AddrType", Type, 0, ""},
{"AddrType.BasicType", Field, 0, ""},
{"ArrayType", Type, 0, ""},
@@ -3163,6 +3302,7 @@ var PackageSymbols = map[string][]Symbol{
{"R_LARCH_B16", Const, 20, ""},
{"R_LARCH_B21", Const, 20, ""},
{"R_LARCH_B26", Const, 20, ""},
+ {"R_LARCH_CALL36", Const, 26, ""},
{"R_LARCH_CFA", Const, 22, ""},
{"R_LARCH_COPY", Const, 19, ""},
{"R_LARCH_DELETE", Const, 22, ""},
@@ -3220,11 +3360,25 @@ var PackageSymbols = map[string][]Symbol{
{"R_LARCH_SUB64", Const, 19, ""},
{"R_LARCH_SUB8", Const, 19, ""},
{"R_LARCH_SUB_ULEB128", Const, 22, ""},
+ {"R_LARCH_TLS_DESC32", Const, 26, ""},
+ {"R_LARCH_TLS_DESC64", Const, 26, ""},
+ {"R_LARCH_TLS_DESC64_HI12", Const, 26, ""},
+ {"R_LARCH_TLS_DESC64_LO20", Const, 26, ""},
+ {"R_LARCH_TLS_DESC64_PC_HI12", Const, 26, ""},
+ {"R_LARCH_TLS_DESC64_PC_LO20", Const, 26, ""},
+ {"R_LARCH_TLS_DESC_CALL", Const, 26, ""},
+ {"R_LARCH_TLS_DESC_HI20", Const, 26, ""},
+ {"R_LARCH_TLS_DESC_LD", Const, 26, ""},
+ {"R_LARCH_TLS_DESC_LO12", Const, 26, ""},
+ {"R_LARCH_TLS_DESC_PCREL20_S2", Const, 26, ""},
+ {"R_LARCH_TLS_DESC_PC_HI20", Const, 26, ""},
+ {"R_LARCH_TLS_DESC_PC_LO12", Const, 26, ""},
{"R_LARCH_TLS_DTPMOD32", Const, 19, ""},
{"R_LARCH_TLS_DTPMOD64", Const, 19, ""},
{"R_LARCH_TLS_DTPREL32", Const, 19, ""},
{"R_LARCH_TLS_DTPREL64", Const, 19, ""},
{"R_LARCH_TLS_GD_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_GD_PCREL20_S2", Const, 26, ""},
{"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""},
{"R_LARCH_TLS_IE64_HI12", Const, 20, ""},
{"R_LARCH_TLS_IE64_LO20", Const, 20, ""},
@@ -3235,11 +3389,15 @@ var PackageSymbols = map[string][]Symbol{
{"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""},
{"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""},
{"R_LARCH_TLS_LD_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LD_PCREL20_S2", Const, 26, ""},
{"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""},
{"R_LARCH_TLS_LE64_HI12", Const, 20, ""},
{"R_LARCH_TLS_LE64_LO20", Const, 20, ""},
+ {"R_LARCH_TLS_LE_ADD_R", Const, 26, ""},
{"R_LARCH_TLS_LE_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LE_HI20_R", Const, 26, ""},
{"R_LARCH_TLS_LE_LO12", Const, 20, ""},
+ {"R_LARCH_TLS_LE_LO12_R", Const, 26, ""},
{"R_LARCH_TLS_TPREL32", Const, 19, ""},
{"R_LARCH_TLS_TPREL64", Const, 19, ""},
{"R_MIPS", Type, 6, ""},
@@ -3944,6 +4102,7 @@ var PackageSymbols = map[string][]Symbol{
{"(FatArch).ImportedSymbols", Method, 3, ""},
{"(FatArch).Section", Method, 3, ""},
{"(FatArch).Segment", Method, 3, ""},
+ {"(Load).Raw", Method, 0, ""},
{"(LoadBytes).Raw", Method, 0, ""},
{"(LoadCmd).GoString", Method, 0, ""},
{"(LoadCmd).String", Method, 0, ""},
@@ -4590,6 +4749,12 @@ var PackageSymbols = map[string][]Symbol{
{"FS", Type, 16, ""},
},
"encoding": {
+ {"(BinaryAppender).AppendBinary", Method, 24, ""},
+ {"(BinaryMarshaler).MarshalBinary", Method, 2, ""},
+ {"(BinaryUnmarshaler).UnmarshalBinary", Method, 2, ""},
+ {"(TextAppender).AppendText", Method, 24, ""},
+ {"(TextMarshaler).MarshalText", Method, 2, ""},
+ {"(TextUnmarshaler).UnmarshalText", Method, 2, ""},
{"BinaryAppender", Type, 24, ""},
{"BinaryMarshaler", Type, 2, ""},
{"BinaryUnmarshaler", Type, 2, ""},
@@ -4705,6 +4870,17 @@ var PackageSymbols = map[string][]Symbol{
{"URLEncoding", Var, 0, ""},
},
"encoding/binary": {
+ {"(AppendByteOrder).AppendUint16", Method, 19, ""},
+ {"(AppendByteOrder).AppendUint32", Method, 19, ""},
+ {"(AppendByteOrder).AppendUint64", Method, 19, ""},
+ {"(AppendByteOrder).String", Method, 19, ""},
+ {"(ByteOrder).PutUint16", Method, 0, ""},
+ {"(ByteOrder).PutUint32", Method, 0, ""},
+ {"(ByteOrder).PutUint64", Method, 0, ""},
+ {"(ByteOrder).String", Method, 0, ""},
+ {"(ByteOrder).Uint16", Method, 0, ""},
+ {"(ByteOrder).Uint32", Method, 0, ""},
+ {"(ByteOrder).Uint64", Method, 0, ""},
{"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"},
{"AppendByteOrder", Type, 19, ""},
{"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"},
@@ -4767,6 +4943,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*Decoder).DecodeValue", Method, 0, ""},
{"(*Encoder).Encode", Method, 0, ""},
{"(*Encoder).EncodeValue", Method, 0, ""},
+ {"(GobDecoder).GobDecode", Method, 0, ""},
+ {"(GobEncoder).GobEncode", Method, 0, ""},
{"CommonType", Type, 0, ""},
{"CommonType.Id", Field, 0, ""},
{"CommonType.Name", Field, 0, ""},
@@ -4819,10 +4997,12 @@ var PackageSymbols = map[string][]Symbol{
{"(*UnsupportedTypeError).Error", Method, 0, ""},
{"(*UnsupportedValueError).Error", Method, 0, ""},
{"(Delim).String", Method, 5, ""},
+ {"(Marshaler).MarshalJSON", Method, 0, ""},
{"(Number).Float64", Method, 1, ""},
{"(Number).Int64", Method, 1, ""},
{"(Number).String", Method, 1, ""},
{"(RawMessage).MarshalJSON", Method, 8, ""},
+ {"(Unmarshaler).UnmarshalJSON", Method, 0, ""},
{"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"},
{"Decoder", Type, 0, ""},
{"Delim", Type, 5, ""},
@@ -4894,10 +5074,15 @@ var PackageSymbols = map[string][]Symbol{
{"(CharData).Copy", Method, 0, ""},
{"(Comment).Copy", Method, 0, ""},
{"(Directive).Copy", Method, 0, ""},
+ {"(Marshaler).MarshalXML", Method, 2, ""},
+ {"(MarshalerAttr).MarshalXMLAttr", Method, 2, ""},
{"(ProcInst).Copy", Method, 0, ""},
{"(StartElement).Copy", Method, 0, ""},
{"(StartElement).End", Method, 2, ""},
+ {"(TokenReader).Token", Method, 10, ""},
{"(UnmarshalError).Error", Method, 0, ""},
+ {"(Unmarshaler).UnmarshalXML", Method, 2, ""},
+ {"(UnmarshalerAttr).UnmarshalXMLAttr", Method, 2, ""},
{"Attr", Type, 0, ""},
{"Attr.Name", Field, 0, ""},
{"Attr.Value", Field, 0, ""},
@@ -4984,6 +5169,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*String).Value", Method, 8, ""},
{"(Func).String", Method, 0, ""},
{"(Func).Value", Method, 8, ""},
+ {"(Var).String", Method, 0, ""},
{"Do", Func, 0, "func(f func(KeyValue))"},
{"Float", Type, 0, ""},
{"Func", Type, 0, ""},
@@ -5039,6 +5225,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*FlagSet).Var", Method, 0, ""},
{"(*FlagSet).Visit", Method, 0, ""},
{"(*FlagSet).VisitAll", Method, 0, ""},
+ {"(Getter).Get", Method, 2, ""},
+ {"(Getter).Set", Method, 2, ""},
+ {"(Getter).String", Method, 2, ""},
+ {"(Value).Set", Method, 0, ""},
+ {"(Value).String", Method, 0, ""},
{"Arg", Func, 0, "func(i int) string"},
{"Args", Func, 0, "func() []string"},
{"Bool", Func, 0, "func(name string, value bool, usage string) *bool"},
@@ -5090,6 +5281,20 @@ var PackageSymbols = map[string][]Symbol{
{"VisitAll", Func, 0, "func(fn func(*Flag))"},
},
"fmt": {
+ {"(Formatter).Format", Method, 0, ""},
+ {"(GoStringer).GoString", Method, 0, ""},
+ {"(ScanState).Read", Method, 0, ""},
+ {"(ScanState).ReadRune", Method, 0, ""},
+ {"(ScanState).SkipSpace", Method, 0, ""},
+ {"(ScanState).Token", Method, 0, ""},
+ {"(ScanState).UnreadRune", Method, 0, ""},
+ {"(ScanState).Width", Method, 0, ""},
+ {"(Scanner).Scan", Method, 0, ""},
+ {"(State).Flag", Method, 0, ""},
+ {"(State).Precision", Method, 0, ""},
+ {"(State).Width", Method, 0, ""},
+ {"(State).Write", Method, 0, ""},
+ {"(Stringer).String", Method, 0, ""},
{"Append", Func, 19, "func(b []byte, a ...any) []byte"},
{"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"},
{"Appendln", Func, 19, "func(b []byte, a ...any) []byte"},
@@ -5248,7 +5453,18 @@ var PackageSymbols = map[string][]Symbol{
{"(CommentMap).Filter", Method, 1, ""},
{"(CommentMap).String", Method, 1, ""},
{"(CommentMap).Update", Method, 1, ""},
+ {"(Decl).End", Method, 0, ""},
+ {"(Decl).Pos", Method, 0, ""},
+ {"(Expr).End", Method, 0, ""},
+ {"(Expr).Pos", Method, 0, ""},
+ {"(Node).End", Method, 0, ""},
+ {"(Node).Pos", Method, 0, ""},
{"(ObjKind).String", Method, 0, ""},
+ {"(Spec).End", Method, 0, ""},
+ {"(Spec).Pos", Method, 0, ""},
+ {"(Stmt).End", Method, 0, ""},
+ {"(Stmt).Pos", Method, 0, ""},
+ {"(Visitor).Visit", Method, 0, ""},
{"ArrayType", Type, 0, ""},
{"ArrayType.Elt", Field, 0, ""},
{"ArrayType.Lbrack", Field, 0, ""},
@@ -5271,6 +5487,7 @@ var PackageSymbols = map[string][]Symbol{
{"BasicLit", Type, 0, ""},
{"BasicLit.Kind", Field, 0, ""},
{"BasicLit.Value", Field, 0, ""},
+ {"BasicLit.ValueEnd", Field, 26, ""},
{"BasicLit.ValuePos", Field, 0, ""},
{"BinaryExpr", Type, 0, ""},
{"BinaryExpr.Op", Field, 0, ""},
@@ -5320,7 +5537,6 @@ var PackageSymbols = map[string][]Symbol{
{"CompositeLit.Rbrace", Field, 0, ""},
{"CompositeLit.Type", Field, 0, ""},
{"Con", Const, 0, ""},
- {"Decl", Type, 0, ""},
{"DeclStmt", Type, 0, ""},
{"DeclStmt.Decl", Field, 0, ""},
{"DeferStmt", Type, 0, ""},
@@ -5341,7 +5557,6 @@ var PackageSymbols = map[string][]Symbol{
{"EmptyStmt", Type, 0, ""},
{"EmptyStmt.Implicit", Field, 5, ""},
{"EmptyStmt.Semicolon", Field, 0, ""},
- {"Expr", Type, 0, ""},
{"ExprStmt", Type, 0, ""},
{"ExprStmt.X", Field, 0, ""},
{"Field", Type, 0, ""},
@@ -5525,11 +5740,9 @@ var PackageSymbols = map[string][]Symbol{
{"SliceExpr.Slice3", Field, 2, ""},
{"SliceExpr.X", Field, 0, ""},
{"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"},
- {"Spec", Type, 0, ""},
{"StarExpr", Type, 0, ""},
{"StarExpr.Star", Field, 0, ""},
{"StarExpr.X", Field, 0, ""},
- {"Stmt", Type, 0, ""},
{"StructType", Type, 0, ""},
{"StructType.Fields", Field, 0, ""},
{"StructType.Incomplete", Field, 0, ""},
@@ -5684,10 +5897,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*SyntaxError).Error", Method, 16, ""},
{"(*TagExpr).Eval", Method, 16, ""},
{"(*TagExpr).String", Method, 16, ""},
+ {"(Expr).Eval", Method, 16, ""},
+ {"(Expr).String", Method, 16, ""},
{"AndExpr", Type, 16, ""},
{"AndExpr.X", Field, 16, ""},
{"AndExpr.Y", Field, 16, ""},
- {"Expr", Type, 16, ""},
{"GoVersion", Func, 21, "func(x Expr) string"},
{"IsGoBuild", Func, 16, "func(line string) bool"},
{"IsPlusBuild", Func, 16, "func(line string) bool"},
@@ -5706,6 +5920,9 @@ var PackageSymbols = map[string][]Symbol{
},
"go/constant": {
{"(Kind).String", Method, 18, ""},
+ {"(Value).ExactString", Method, 6, ""},
+ {"(Value).Kind", Method, 5, ""},
+ {"(Value).String", Method, 5, ""},
{"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"},
{"BitLen", Func, 5, "func(x Value) int"},
{"Bool", Const, 5, ""},
@@ -5744,7 +5961,6 @@ var PackageSymbols = map[string][]Symbol{
{"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"},
{"Unknown", Const, 5, ""},
{"Val", Func, 13, "func(x Value) any"},
- {"Value", Type, 5, ""},
},
"go/doc": {
{"(*Package).Filter", Method, 0, ""},
@@ -5828,7 +6044,6 @@ var PackageSymbols = map[string][]Symbol{
{"(*Printer).HTML", Method, 19, ""},
{"(*Printer).Markdown", Method, 19, ""},
{"(*Printer).Text", Method, 19, ""},
- {"Block", Type, 19, ""},
{"Code", Type, 19, ""},
{"Code.Text", Field, 19, ""},
{"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"},
@@ -5873,7 +6088,6 @@ var PackageSymbols = map[string][]Symbol{
{"Printer.TextCodePrefix", Field, 19, ""},
{"Printer.TextPrefix", Field, 19, ""},
{"Printer.TextWidth", Field, 19, ""},
- {"Text", Type, 19, ""},
},
"go/format": {
{"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"},
@@ -5945,6 +6159,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*File).AddLineColumnInfo", Method, 11, ""},
{"(*File).AddLineInfo", Method, 0, ""},
{"(*File).Base", Method, 0, ""},
+ {"(*File).End", Method, 26, ""},
{"(*File).Line", Method, 0, ""},
{"(*File).LineCount", Method, 0, ""},
{"(*File).LineStart", Method, 12, ""},
@@ -6307,6 +6522,22 @@ var PackageSymbols = map[string][]Symbol{
{"(Checker).PkgNameOf", Method, 22, ""},
{"(Checker).TypeOf", Method, 5, ""},
{"(Error).Error", Method, 5, ""},
+ {"(Importer).Import", Method, 5, ""},
+ {"(ImporterFrom).Import", Method, 6, ""},
+ {"(ImporterFrom).ImportFrom", Method, 6, ""},
+ {"(Object).Exported", Method, 5, ""},
+ {"(Object).Id", Method, 5, ""},
+ {"(Object).Name", Method, 5, ""},
+ {"(Object).Parent", Method, 5, ""},
+ {"(Object).Pkg", Method, 5, ""},
+ {"(Object).Pos", Method, 5, ""},
+ {"(Object).String", Method, 5, ""},
+ {"(Object).Type", Method, 5, ""},
+ {"(Sizes).Alignof", Method, 5, ""},
+ {"(Sizes).Offsetsof", Method, 5, ""},
+ {"(Sizes).Sizeof", Method, 5, ""},
+ {"(Type).String", Method, 5, ""},
+ {"(Type).Underlying", Method, 5, ""},
{"(TypeAndValue).Addressable", Method, 5, ""},
{"(TypeAndValue).Assignable", Method, 5, ""},
{"(TypeAndValue).HasOk", Method, 5, ""},
@@ -6445,7 +6676,6 @@ var PackageSymbols = map[string][]Symbol{
{"NewUnion", Func, 18, "func(terms []*Term) *Union"},
{"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
{"Nil", Type, 5, ""},
- {"Object", Type, 5, ""},
{"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"},
{"Package", Type, 5, ""},
{"PackageVar", Const, 25, ""},
@@ -6516,6 +6746,33 @@ var PackageSymbols = map[string][]Symbol{
{"Lang", Func, 22, "func(x string) string"},
},
"hash": {
+ {"(Cloner).BlockSize", Method, 25, ""},
+ {"(Cloner).Clone", Method, 25, ""},
+ {"(Cloner).Reset", Method, 25, ""},
+ {"(Cloner).Size", Method, 25, ""},
+ {"(Cloner).Sum", Method, 25, ""},
+ {"(Cloner).Write", Method, 25, ""},
+ {"(Hash).BlockSize", Method, 0, ""},
+ {"(Hash).Reset", Method, 0, ""},
+ {"(Hash).Size", Method, 0, ""},
+ {"(Hash).Sum", Method, 0, ""},
+ {"(Hash).Write", Method, 0, ""},
+ {"(Hash32).BlockSize", Method, 0, ""},
+ {"(Hash32).Reset", Method, 0, ""},
+ {"(Hash32).Size", Method, 0, ""},
+ {"(Hash32).Sum", Method, 0, ""},
+ {"(Hash32).Sum32", Method, 0, ""},
+ {"(Hash32).Write", Method, 0, ""},
+ {"(Hash64).BlockSize", Method, 0, ""},
+ {"(Hash64).Reset", Method, 0, ""},
+ {"(Hash64).Size", Method, 0, ""},
+ {"(Hash64).Sum", Method, 0, ""},
+ {"(Hash64).Sum64", Method, 0, ""},
+ {"(Hash64).Write", Method, 0, ""},
+ {"(XOF).BlockSize", Method, 25, ""},
+ {"(XOF).Read", Method, 25, ""},
+ {"(XOF).Reset", Method, 25, ""},
+ {"(XOF).Write", Method, 25, ""},
{"Cloner", Type, 25, ""},
{"Hash", Type, 0, ""},
{"Hash32", Type, 0, ""},
@@ -6781,6 +7038,13 @@ var PackageSymbols = map[string][]Symbol{
{"(*YCbCr).SubImage", Method, 0, ""},
{"(*YCbCr).YCbCrAt", Method, 4, ""},
{"(*YCbCr).YOffset", Method, 0, ""},
+ {"(Image).At", Method, 0, ""},
+ {"(Image).Bounds", Method, 0, ""},
+ {"(Image).ColorModel", Method, 0, ""},
+ {"(PalettedImage).At", Method, 0, ""},
+ {"(PalettedImage).Bounds", Method, 0, ""},
+ {"(PalettedImage).ColorIndexAt", Method, 0, ""},
+ {"(PalettedImage).ColorModel", Method, 0, ""},
{"(Point).Add", Method, 0, ""},
{"(Point).Div", Method, 0, ""},
{"(Point).Eq", Method, 0, ""},
@@ -6789,6 +7053,10 @@ var PackageSymbols = map[string][]Symbol{
{"(Point).Mul", Method, 0, ""},
{"(Point).String", Method, 0, ""},
{"(Point).Sub", Method, 0, ""},
+ {"(RGBA64Image).At", Method, 17, ""},
+ {"(RGBA64Image).Bounds", Method, 17, ""},
+ {"(RGBA64Image).ColorModel", Method, 17, ""},
+ {"(RGBA64Image).RGBA64At", Method, 17, ""},
{"(Rectangle).Add", Method, 0, ""},
{"(Rectangle).At", Method, 5, ""},
{"(Rectangle).Bounds", Method, 5, ""},
@@ -6913,8 +7181,10 @@ var PackageSymbols = map[string][]Symbol{
{"(Alpha).RGBA", Method, 0, ""},
{"(Alpha16).RGBA", Method, 0, ""},
{"(CMYK).RGBA", Method, 5, ""},
+ {"(Color).RGBA", Method, 0, ""},
{"(Gray).RGBA", Method, 0, ""},
{"(Gray16).RGBA", Method, 0, ""},
+ {"(Model).Convert", Method, 0, ""},
{"(NRGBA).RGBA", Method, 0, ""},
{"(NRGBA64).RGBA", Method, 0, ""},
{"(NYCbCrA).RGBA", Method, 6, ""},
@@ -6992,7 +7262,19 @@ var PackageSymbols = map[string][]Symbol{
{"WebSafe", Var, 2, ""},
},
"image/draw": {
+ {"(Drawer).Draw", Method, 2, ""},
+ {"(Image).At", Method, 0, ""},
+ {"(Image).Bounds", Method, 0, ""},
+ {"(Image).ColorModel", Method, 0, ""},
+ {"(Image).Set", Method, 0, ""},
{"(Op).Draw", Method, 2, ""},
+ {"(Quantizer).Quantize", Method, 2, ""},
+ {"(RGBA64Image).At", Method, 17, ""},
+ {"(RGBA64Image).Bounds", Method, 17, ""},
+ {"(RGBA64Image).ColorModel", Method, 17, ""},
+ {"(RGBA64Image).RGBA64At", Method, 17, ""},
+ {"(RGBA64Image).Set", Method, 17, ""},
+ {"(RGBA64Image).SetRGBA64", Method, 17, ""},
{"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"},
{"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"},
{"Drawer", Type, 2, ""},
@@ -7027,6 +7309,8 @@ var PackageSymbols = map[string][]Symbol{
},
"image/jpeg": {
{"(FormatError).Error", Method, 0, ""},
+ {"(Reader).Read", Method, 0, ""},
+ {"(Reader).ReadByte", Method, 0, ""},
{"(UnsupportedError).Error", Method, 0, ""},
{"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
{"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
@@ -7040,6 +7324,8 @@ var PackageSymbols = map[string][]Symbol{
},
"image/png": {
{"(*Encoder).Encode", Method, 4, ""},
+ {"(EncoderBufferPool).Get", Method, 9, ""},
+ {"(EncoderBufferPool).Put", Method, 9, ""},
{"(FormatError).Error", Method, 0, ""},
{"(UnsupportedError).Error", Method, 0, ""},
{"BestCompression", Const, 4, ""},
@@ -7083,6 +7369,41 @@ var PackageSymbols = map[string][]Symbol{
{"(*SectionReader).ReadAt", Method, 0, ""},
{"(*SectionReader).Seek", Method, 0, ""},
{"(*SectionReader).Size", Method, 0, ""},
+ {"(ByteReader).ReadByte", Method, 0, ""},
+ {"(ByteScanner).ReadByte", Method, 0, ""},
+ {"(ByteScanner).UnreadByte", Method, 0, ""},
+ {"(ByteWriter).WriteByte", Method, 1, ""},
+ {"(Closer).Close", Method, 0, ""},
+ {"(ReadCloser).Close", Method, 0, ""},
+ {"(ReadCloser).Read", Method, 0, ""},
+ {"(ReadSeekCloser).Close", Method, 16, ""},
+ {"(ReadSeekCloser).Read", Method, 16, ""},
+ {"(ReadSeekCloser).Seek", Method, 16, ""},
+ {"(ReadSeeker).Read", Method, 0, ""},
+ {"(ReadSeeker).Seek", Method, 0, ""},
+ {"(ReadWriteCloser).Close", Method, 0, ""},
+ {"(ReadWriteCloser).Read", Method, 0, ""},
+ {"(ReadWriteCloser).Write", Method, 0, ""},
+ {"(ReadWriteSeeker).Read", Method, 0, ""},
+ {"(ReadWriteSeeker).Seek", Method, 0, ""},
+ {"(ReadWriteSeeker).Write", Method, 0, ""},
+ {"(ReadWriter).Read", Method, 0, ""},
+ {"(ReadWriter).Write", Method, 0, ""},
+ {"(Reader).Read", Method, 0, ""},
+ {"(ReaderAt).ReadAt", Method, 0, ""},
+ {"(ReaderFrom).ReadFrom", Method, 0, ""},
+ {"(RuneReader).ReadRune", Method, 0, ""},
+ {"(RuneScanner).ReadRune", Method, 0, ""},
+ {"(RuneScanner).UnreadRune", Method, 0, ""},
+ {"(Seeker).Seek", Method, 0, ""},
+ {"(StringWriter).WriteString", Method, 12, ""},
+ {"(WriteCloser).Close", Method, 0, ""},
+ {"(WriteCloser).Write", Method, 0, ""},
+ {"(WriteSeeker).Seek", Method, 0, ""},
+ {"(WriteSeeker).Write", Method, 0, ""},
+ {"(Writer).Write", Method, 0, ""},
+ {"(WriterAt).WriteAt", Method, 0, ""},
+ {"(WriterTo).WriteTo", Method, 0, ""},
{"ByteReader", Type, 0, ""},
{"ByteScanner", Type, 0, ""},
{"ByteWriter", Type, 1, ""},
@@ -7142,11 +7463,42 @@ var PackageSymbols = map[string][]Symbol{
{"(*PathError).Error", Method, 16, ""},
{"(*PathError).Timeout", Method, 16, ""},
{"(*PathError).Unwrap", Method, 16, ""},
+ {"(DirEntry).Info", Method, 16, ""},
+ {"(DirEntry).IsDir", Method, 16, ""},
+ {"(DirEntry).Name", Method, 16, ""},
+ {"(DirEntry).Type", Method, 16, ""},
+ {"(FS).Open", Method, 16, ""},
+ {"(File).Close", Method, 16, ""},
+ {"(File).Read", Method, 16, ""},
+ {"(File).Stat", Method, 16, ""},
+ {"(FileInfo).IsDir", Method, 16, ""},
+ {"(FileInfo).ModTime", Method, 16, ""},
+ {"(FileInfo).Mode", Method, 16, ""},
+ {"(FileInfo).Name", Method, 16, ""},
+ {"(FileInfo).Size", Method, 16, ""},
+ {"(FileInfo).Sys", Method, 16, ""},
{"(FileMode).IsDir", Method, 16, ""},
{"(FileMode).IsRegular", Method, 16, ""},
{"(FileMode).Perm", Method, 16, ""},
{"(FileMode).String", Method, 16, ""},
{"(FileMode).Type", Method, 16, ""},
+ {"(GlobFS).Glob", Method, 16, ""},
+ {"(GlobFS).Open", Method, 16, ""},
+ {"(ReadDirFS).Open", Method, 16, ""},
+ {"(ReadDirFS).ReadDir", Method, 16, ""},
+ {"(ReadDirFile).Close", Method, 16, ""},
+ {"(ReadDirFile).Read", Method, 16, ""},
+ {"(ReadDirFile).ReadDir", Method, 16, ""},
+ {"(ReadDirFile).Stat", Method, 16, ""},
+ {"(ReadFileFS).Open", Method, 16, ""},
+ {"(ReadFileFS).ReadFile", Method, 16, ""},
+ {"(ReadLinkFS).Lstat", Method, 25, ""},
+ {"(ReadLinkFS).Open", Method, 25, ""},
+ {"(ReadLinkFS).ReadLink", Method, 25, ""},
+ {"(StatFS).Open", Method, 16, ""},
+ {"(StatFS).Stat", Method, 16, ""},
+ {"(SubFS).Open", Method, 16, ""},
+ {"(SubFS).Sub", Method, 16, ""},
{"DirEntry", Type, 16, ""},
{"ErrClosed", Var, 16, ""},
{"ErrExist", Var, 16, ""},
@@ -7299,12 +7651,18 @@ var PackageSymbols = map[string][]Symbol{
{"(*TextHandler).WithGroup", Method, 21, ""},
{"(Attr).Equal", Method, 21, ""},
{"(Attr).String", Method, 21, ""},
+ {"(Handler).Enabled", Method, 21, ""},
+ {"(Handler).Handle", Method, 21, ""},
+ {"(Handler).WithAttrs", Method, 21, ""},
+ {"(Handler).WithGroup", Method, 21, ""},
{"(Kind).String", Method, 21, ""},
{"(Level).AppendText", Method, 24, ""},
{"(Level).Level", Method, 21, ""},
{"(Level).MarshalJSON", Method, 21, ""},
{"(Level).MarshalText", Method, 21, ""},
{"(Level).String", Method, 21, ""},
+ {"(Leveler).Level", Method, 21, ""},
+ {"(LogValuer).LogValue", Method, 21, ""},
{"(Record).Attrs", Method, 21, ""},
{"(Record).Clone", Method, 21, ""},
{"(Record).NumAttrs", Method, 21, ""},
@@ -7833,6 +8191,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*Rand).Uint32", Method, 0, ""},
{"(*Rand).Uint64", Method, 8, ""},
{"(*Zipf).Uint64", Method, 0, ""},
+ {"(Source).Int63", Method, 0, ""},
+ {"(Source).Seed", Method, 0, ""},
+ {"(Source64).Int63", Method, 8, ""},
+ {"(Source64).Seed", Method, 8, ""},
+ {"(Source64).Uint64", Method, 8, ""},
{"ExpFloat64", Func, 0, "func() float64"},
{"Float32", Func, 0, "func() float32"},
{"Float64", Func, 0, "func() float64"},
@@ -7888,6 +8251,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Rand).Uint64N", Method, 22, ""},
{"(*Rand).UintN", Method, 22, ""},
{"(*Zipf).Uint64", Method, 22, ""},
+ {"(Source).Uint64", Method, 22, ""},
{"ChaCha8", Type, 22, ""},
{"ExpFloat64", Func, 22, "func() float64"},
{"Float32", Func, 22, "func() float32"},
@@ -7951,6 +8315,10 @@ var PackageSymbols = map[string][]Symbol{
{"(*Writer).FormDataContentType", Method, 0, ""},
{"(*Writer).SetBoundary", Method, 1, ""},
{"(*Writer).WriteField", Method, 0, ""},
+ {"(File).Close", Method, 0, ""},
+ {"(File).Read", Method, 0, ""},
+ {"(File).ReadAt", Method, 0, ""},
+ {"(File).Seek", Method, 0, ""},
{"ErrMessageTooLarge", Var, 9, ""},
{"File", Type, 0, ""},
{"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"},
@@ -8135,6 +8503,19 @@ var PackageSymbols = map[string][]Symbol{
{"(*UnixListener).SetDeadline", Method, 0, ""},
{"(*UnixListener).SetUnlinkOnClose", Method, 8, ""},
{"(*UnixListener).SyscallConn", Method, 10, ""},
+ {"(Addr).Network", Method, 0, ""},
+ {"(Addr).String", Method, 0, ""},
+ {"(Conn).Close", Method, 0, ""},
+ {"(Conn).LocalAddr", Method, 0, ""},
+ {"(Conn).Read", Method, 0, ""},
+ {"(Conn).RemoteAddr", Method, 0, ""},
+ {"(Conn).SetDeadline", Method, 0, ""},
+ {"(Conn).SetReadDeadline", Method, 0, ""},
+ {"(Conn).SetWriteDeadline", Method, 0, ""},
+ {"(Conn).Write", Method, 0, ""},
+ {"(Error).Error", Method, 0, ""},
+ {"(Error).Temporary", Method, 0, ""},
+ {"(Error).Timeout", Method, 0, ""},
{"(Flags).String", Method, 0, ""},
{"(HardwareAddr).String", Method, 0, ""},
{"(IP).AppendText", Method, 24, ""},
@@ -8158,6 +8539,16 @@ var PackageSymbols = map[string][]Symbol{
{"(InvalidAddrError).Error", Method, 0, ""},
{"(InvalidAddrError).Temporary", Method, 0, ""},
{"(InvalidAddrError).Timeout", Method, 0, ""},
+ {"(Listener).Accept", Method, 0, ""},
+ {"(Listener).Addr", Method, 0, ""},
+ {"(Listener).Close", Method, 0, ""},
+ {"(PacketConn).Close", Method, 0, ""},
+ {"(PacketConn).LocalAddr", Method, 0, ""},
+ {"(PacketConn).ReadFrom", Method, 0, ""},
+ {"(PacketConn).SetDeadline", Method, 0, ""},
+ {"(PacketConn).SetReadDeadline", Method, 0, ""},
+ {"(PacketConn).SetWriteDeadline", Method, 0, ""},
+ {"(PacketConn).WriteTo", Method, 0, ""},
{"(UnknownNetworkError).Error", Method, 0, ""},
{"(UnknownNetworkError).Temporary", Method, 0, ""},
{"(UnknownNetworkError).Timeout", Method, 0, ""},
@@ -8333,6 +8724,14 @@ var PackageSymbols = map[string][]Symbol{
{"(*Client).Head", Method, 0, ""},
{"(*Client).Post", Method, 0, ""},
{"(*Client).PostForm", Method, 0, ""},
+ {"(*ClientConn).Available", Method, 26, ""},
+ {"(*ClientConn).Close", Method, 26, ""},
+ {"(*ClientConn).Err", Method, 26, ""},
+ {"(*ClientConn).InFlight", Method, 26, ""},
+ {"(*ClientConn).Release", Method, 26, ""},
+ {"(*ClientConn).Reserve", Method, 26, ""},
+ {"(*ClientConn).RoundTrip", Method, 26, ""},
+ {"(*ClientConn).SetStateHook", Method, 26, ""},
{"(*Cookie).String", Method, 0, ""},
{"(*Cookie).Valid", Method, 18, ""},
{"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""},
@@ -8392,10 +8791,22 @@ var PackageSymbols = map[string][]Symbol{
{"(*Transport).CancelRequest", Method, 1, ""},
{"(*Transport).Clone", Method, 13, ""},
{"(*Transport).CloseIdleConnections", Method, 0, ""},
+ {"(*Transport).NewClientConn", Method, 26, ""},
{"(*Transport).RegisterProtocol", Method, 0, ""},
{"(*Transport).RoundTrip", Method, 0, ""},
+ {"(CloseNotifier).CloseNotify", Method, 1, ""},
{"(ConnState).String", Method, 3, ""},
+ {"(CookieJar).Cookies", Method, 0, ""},
+ {"(CookieJar).SetCookies", Method, 0, ""},
{"(Dir).Open", Method, 0, ""},
+ {"(File).Close", Method, 0, ""},
+ {"(File).Read", Method, 0, ""},
+ {"(File).Readdir", Method, 0, ""},
+ {"(File).Seek", Method, 0, ""},
+ {"(File).Stat", Method, 0, ""},
+ {"(FileSystem).Open", Method, 0, ""},
+ {"(Flusher).Flush", Method, 0, ""},
+ {"(Handler).ServeHTTP", Method, 0, ""},
{"(HandlerFunc).ServeHTTP", Method, 0, ""},
{"(Header).Add", Method, 0, ""},
{"(Header).Clone", Method, 13, ""},
@@ -8405,10 +8816,16 @@ var PackageSymbols = map[string][]Symbol{
{"(Header).Values", Method, 14, ""},
{"(Header).Write", Method, 0, ""},
{"(Header).WriteSubset", Method, 0, ""},
+ {"(Hijacker).Hijack", Method, 0, ""},
{"(Protocols).HTTP1", Method, 24, ""},
{"(Protocols).HTTP2", Method, 24, ""},
{"(Protocols).String", Method, 24, ""},
{"(Protocols).UnencryptedHTTP2", Method, 24, ""},
+ {"(Pusher).Push", Method, 8, ""},
+ {"(ResponseWriter).Header", Method, 0, ""},
+ {"(ResponseWriter).Write", Method, 0, ""},
+ {"(ResponseWriter).WriteHeader", Method, 0, ""},
+ {"(RoundTripper).RoundTrip", Method, 0, ""},
{"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"},
{"CanonicalHeaderKey", Func, 0, "func(s string) string"},
{"Client", Type, 0, ""},
@@ -8416,6 +8833,7 @@ var PackageSymbols = map[string][]Symbol{
{"Client.Jar", Field, 0, ""},
{"Client.Timeout", Field, 3, ""},
{"Client.Transport", Field, 0, ""},
+ {"ClientConn", Type, 26, ""},
{"CloseNotifier", Type, 1, ""},
{"ConnState", Type, 3, ""},
{"Cookie", Type, 0, ""},
@@ -8726,6 +9144,8 @@ var PackageSymbols = map[string][]Symbol{
"net/http/cookiejar": {
{"(*Jar).Cookies", Method, 1, ""},
{"(*Jar).SetCookies", Method, 1, ""},
+ {"(PublicSuffixList).PublicSuffix", Method, 1, ""},
+ {"(PublicSuffixList).String", Method, 1, ""},
{"Jar", Type, 1, ""},
{"New", Func, 1, "func(o *Options) (*Jar, error)"},
{"Options", Type, 1, ""},
@@ -8819,6 +9239,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*ServerConn).Pending", Method, 0, ""},
{"(*ServerConn).Read", Method, 0, ""},
{"(*ServerConn).Write", Method, 0, ""},
+ {"(BufferPool).Get", Method, 6, ""},
+ {"(BufferPool).Put", Method, 6, ""},
{"BufferPool", Type, 6, ""},
{"ClientConn", Type, 0, ""},
{"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
@@ -8972,6 +9394,14 @@ var PackageSymbols = map[string][]Symbol{
{"(*Server).ServeConn", Method, 0, ""},
{"(*Server).ServeHTTP", Method, 0, ""},
{"(*Server).ServeRequest", Method, 0, ""},
+ {"(ClientCodec).Close", Method, 0, ""},
+ {"(ClientCodec).ReadResponseBody", Method, 0, ""},
+ {"(ClientCodec).ReadResponseHeader", Method, 0, ""},
+ {"(ClientCodec).WriteRequest", Method, 0, ""},
+ {"(ServerCodec).Close", Method, 0, ""},
+ {"(ServerCodec).ReadRequestBody", Method, 0, ""},
+ {"(ServerCodec).ReadRequestHeader", Method, 0, ""},
+ {"(ServerCodec).WriteResponse", Method, 0, ""},
{"(ServerError).Error", Method, 0, ""},
{"Accept", Func, 0, "func(lis net.Listener)"},
{"Call", Type, 0, ""},
@@ -9030,6 +9460,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*Client).StartTLS", Method, 0, ""},
{"(*Client).TLSConnectionState", Method, 5, ""},
{"(*Client).Verify", Method, 0, ""},
+ {"(Auth).Next", Method, 0, ""},
+ {"(Auth).Start", Method, 0, ""},
{"Auth", Type, 0, ""},
{"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"},
{"Client", Type, 0, ""},
@@ -9241,10 +9673,18 @@ var PackageSymbols = map[string][]Symbol{
{"(*SyscallError).Error", Method, 0, ""},
{"(*SyscallError).Timeout", Method, 10, ""},
{"(*SyscallError).Unwrap", Method, 13, ""},
+ {"(FileInfo).IsDir", Method, 0, ""},
+ {"(FileInfo).ModTime", Method, 0, ""},
+ {"(FileInfo).Mode", Method, 0, ""},
+ {"(FileInfo).Name", Method, 0, ""},
+ {"(FileInfo).Size", Method, 0, ""},
+ {"(FileInfo).Sys", Method, 0, ""},
{"(FileMode).IsDir", Method, 0, ""},
{"(FileMode).IsRegular", Method, 1, ""},
{"(FileMode).Perm", Method, 0, ""},
{"(FileMode).String", Method, 0, ""},
+ {"(Signal).Signal", Method, 0, ""},
+ {"(Signal).String", Method, 0, ""},
{"Args", Var, 0, ""},
{"Chdir", Func, 0, "func(dir string) error"},
{"Chmod", Func, 0, "func(name string, mode FileMode) error"},
@@ -9521,6 +9961,45 @@ var PackageSymbols = map[string][]Symbol{
{"(StructField).IsExported", Method, 17, ""},
{"(StructTag).Get", Method, 0, ""},
{"(StructTag).Lookup", Method, 7, ""},
+ {"(Type).Align", Method, 0, ""},
+ {"(Type).AssignableTo", Method, 0, ""},
+ {"(Type).Bits", Method, 0, ""},
+ {"(Type).CanSeq", Method, 23, ""},
+ {"(Type).CanSeq2", Method, 23, ""},
+ {"(Type).ChanDir", Method, 0, ""},
+ {"(Type).Comparable", Method, 4, ""},
+ {"(Type).ConvertibleTo", Method, 1, ""},
+ {"(Type).Elem", Method, 0, ""},
+ {"(Type).Field", Method, 0, ""},
+ {"(Type).FieldAlign", Method, 0, ""},
+ {"(Type).FieldByIndex", Method, 0, ""},
+ {"(Type).FieldByName", Method, 0, ""},
+ {"(Type).FieldByNameFunc", Method, 0, ""},
+ {"(Type).Fields", Method, 26, ""},
+ {"(Type).Implements", Method, 0, ""},
+ {"(Type).In", Method, 0, ""},
+ {"(Type).Ins", Method, 26, ""},
+ {"(Type).IsVariadic", Method, 0, ""},
+ {"(Type).Key", Method, 0, ""},
+ {"(Type).Kind", Method, 0, ""},
+ {"(Type).Len", Method, 0, ""},
+ {"(Type).Method", Method, 0, ""},
+ {"(Type).MethodByName", Method, 0, ""},
+ {"(Type).Methods", Method, 26, ""},
+ {"(Type).Name", Method, 0, ""},
+ {"(Type).NumField", Method, 0, ""},
+ {"(Type).NumIn", Method, 0, ""},
+ {"(Type).NumMethod", Method, 0, ""},
+ {"(Type).NumOut", Method, 0, ""},
+ {"(Type).Out", Method, 0, ""},
+ {"(Type).Outs", Method, 26, ""},
+ {"(Type).OverflowComplex", Method, 23, ""},
+ {"(Type).OverflowFloat", Method, 23, ""},
+ {"(Type).OverflowInt", Method, 23, ""},
+ {"(Type).OverflowUint", Method, 23, ""},
+ {"(Type).PkgPath", Method, 0, ""},
+ {"(Type).Size", Method, 0, ""},
+ {"(Type).String", Method, 0, ""},
{"(Value).Addr", Method, 0, ""},
{"(Value).Bool", Method, 0, ""},
{"(Value).Bytes", Method, 0, ""},
@@ -9547,6 +10026,7 @@ var PackageSymbols = map[string][]Symbol{
{"(Value).FieldByIndexErr", Method, 18, ""},
{"(Value).FieldByName", Method, 0, ""},
{"(Value).FieldByNameFunc", Method, 0, ""},
+ {"(Value).Fields", Method, 26, ""},
{"(Value).Float", Method, 0, ""},
{"(Value).Grow", Method, 20, ""},
{"(Value).Index", Method, 0, ""},
@@ -9563,6 +10043,7 @@ var PackageSymbols = map[string][]Symbol{
{"(Value).MapRange", Method, 12, ""},
{"(Value).Method", Method, 0, ""},
{"(Value).MethodByName", Method, 0, ""},
+ {"(Value).Methods", Method, 26, ""},
{"(Value).NumField", Method, 0, ""},
{"(Value).NumMethod", Method, 0, ""},
{"(Value).OverflowComplex", Method, 0, ""},
@@ -9678,7 +10159,6 @@ var PackageSymbols = map[string][]Symbol{
{"StructOf", Func, 7, "func(fields []StructField) Type"},
{"StructTag", Type, 0, ""},
{"Swapper", Func, 8, "func(slice any) func(i int, j int)"},
- {"Type", Type, 0, ""},
{"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"},
{"TypeFor", Func, 22, "func[T any]() Type"},
{"TypeOf", Func, 0, "func(i any) Type"},
@@ -9880,6 +10360,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*TypeAssertionError).Error", Method, 0, ""},
{"(*TypeAssertionError).RuntimeError", Method, 0, ""},
{"(Cleanup).Stop", Method, 24, ""},
+ {"(Error).Error", Method, 0, ""},
+ {"(Error).RuntimeError", Method, 0, ""},
{"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"},
{"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"},
{"BlockProfileRecord", Type, 1, ""},
@@ -10154,6 +10636,9 @@ var PackageSymbols = map[string][]Symbol{
{"(IntSlice).Search", Method, 0, ""},
{"(IntSlice).Sort", Method, 0, ""},
{"(IntSlice).Swap", Method, 0, ""},
+ {"(Interface).Len", Method, 0, ""},
+ {"(Interface).Less", Method, 0, ""},
+ {"(Interface).Swap", Method, 0, ""},
{"(StringSlice).Len", Method, 0, ""},
{"(StringSlice).Less", Method, 0, ""},
{"(StringSlice).Search", Method, 0, ""},
@@ -10345,6 +10830,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*WaitGroup).Done", Method, 0, ""},
{"(*WaitGroup).Go", Method, 25, ""},
{"(*WaitGroup).Wait", Method, 0, ""},
+ {"(Locker).Lock", Method, 0, ""},
+ {"(Locker).Unlock", Method, 0, ""},
{"Cond", Type, 0, ""},
{"Cond.L", Field, 0, ""},
{"Locker", Type, 0, ""},
@@ -10486,10 +10973,14 @@ var PackageSymbols = map[string][]Symbol{
{"(*Timeval).Nano", Method, 0, ""},
{"(*Timeval).Nanoseconds", Method, 0, ""},
{"(*Timeval).Unix", Method, 0, ""},
+ {"(Conn).SyscallConn", Method, 9, ""},
{"(Errno).Error", Method, 0, ""},
{"(Errno).Is", Method, 13, ""},
{"(Errno).Temporary", Method, 0, ""},
{"(Errno).Timeout", Method, 0, ""},
+ {"(RawConn).Control", Method, 9, ""},
+ {"(RawConn).Read", Method, 9, ""},
+ {"(RawConn).Write", Method, 9, ""},
{"(Signal).Signal", Method, 0, ""},
{"(Signal).String", Method, 0, ""},
{"(Token).Close", Method, 0, ""},
@@ -14409,7 +14900,7 @@ var PackageSymbols = map[string][]Symbol{
{"RouteMessage.Data", Field, 0, ""},
{"RouteMessage.Header", Field, 0, ""},
{"RouteRIB", Func, 0, ""},
- {"RoutingMessage", Type, 0, ""},
+ {"RoutingMessage", Type, 14, ""},
{"RtAttr", Type, 0, ""},
{"RtAttr.Len", Field, 0, ""},
{"RtAttr.Type", Field, 0, ""},
@@ -15895,7 +16386,6 @@ var PackageSymbols = map[string][]Symbol{
{"SockFprog.Filter", Field, 0, ""},
{"SockFprog.Len", Field, 0, ""},
{"SockFprog.Pad_cgo_0", Field, 0, ""},
- {"Sockaddr", Type, 0, ""},
{"SockaddrDatalink", Type, 0, ""},
{"SockaddrDatalink.Alen", Field, 0, ""},
{"SockaddrDatalink.Data", Field, 0, ""},
@@ -16801,6 +17291,29 @@ var PackageSymbols = map[string][]Symbol{
{"(BenchmarkResult).MemString", Method, 1, ""},
{"(BenchmarkResult).NsPerOp", Method, 0, ""},
{"(BenchmarkResult).String", Method, 0, ""},
+ {"(TB).ArtifactDir", Method, 26, ""},
+ {"(TB).Attr", Method, 25, ""},
+ {"(TB).Chdir", Method, 24, ""},
+ {"(TB).Cleanup", Method, 14, ""},
+ {"(TB).Context", Method, 24, ""},
+ {"(TB).Error", Method, 2, ""},
+ {"(TB).Errorf", Method, 2, ""},
+ {"(TB).Fail", Method, 2, ""},
+ {"(TB).FailNow", Method, 2, ""},
+ {"(TB).Failed", Method, 2, ""},
+ {"(TB).Fatal", Method, 2, ""},
+ {"(TB).Fatalf", Method, 2, ""},
+ {"(TB).Helper", Method, 9, ""},
+ {"(TB).Log", Method, 2, ""},
+ {"(TB).Logf", Method, 2, ""},
+ {"(TB).Name", Method, 8, ""},
+ {"(TB).Output", Method, 25, ""},
+ {"(TB).Setenv", Method, 17, ""},
+ {"(TB).Skip", Method, 2, ""},
+ {"(TB).SkipNow", Method, 2, ""},
+ {"(TB).Skipf", Method, 2, ""},
+ {"(TB).Skipped", Method, 2, ""},
+ {"(TB).TempDir", Method, 15, ""},
{"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"},
{"B", Type, 0, ""},
{"B.N", Field, 0, ""},
@@ -16851,7 +17364,6 @@ var PackageSymbols = map[string][]Symbol{
{"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"},
{"Short", Func, 0, "func() bool"},
{"T", Type, 0, ""},
- {"TB", Type, 2, ""},
{"Testing", Func, 21, "func() bool"},
{"Verbose", Func, 1, "func() bool"},
},
@@ -16887,6 +17399,7 @@ var PackageSymbols = map[string][]Symbol{
"testing/quick": {
{"(*CheckEqualError).Error", Method, 0, ""},
{"(*CheckError).Error", Method, 0, ""},
+ {"(Generator).Generate", Method, 0, ""},
{"(SetupError).Error", Method, 0, ""},
{"Check", Func, 0, "func(f any, config *Config) error"},
{"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"},
@@ -17093,6 +17606,10 @@ var PackageSymbols = map[string][]Symbol{
{"(ListNode).Position", Method, 1, ""},
{"(ListNode).Type", Method, 0, ""},
{"(NilNode).Position", Method, 1, ""},
+ {"(Node).Copy", Method, 0, ""},
+ {"(Node).Position", Method, 1, ""},
+ {"(Node).String", Method, 0, ""},
+ {"(Node).Type", Method, 0, ""},
{"(NodeType).Type", Method, 0, ""},
{"(NumberNode).Position", Method, 1, ""},
{"(NumberNode).Type", Method, 0, ""},
diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
index e223e0f34..59a5de36a 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
@@ -39,7 +39,7 @@ const (
Var // "EOF"
Const // "Pi"
Field // "Point.X"
- Method // "(*Buffer).Grow"
+ Method // "(*Buffer).Grow" or "(Reader).Read"
)
func (kind Kind) String() string {
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
index 3db2a135b..7ebe9768b 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
@@ -8,7 +8,7 @@ import (
"fmt"
"go/ast"
"go/types"
- _ "unsafe"
+ _ "unsafe" // for go:linkname hack
)
// CallKind describes the function position of an [*ast.CallExpr].
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index fef74a785..51001666e 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -23,7 +23,6 @@ import (
"go/token"
"go/types"
"reflect"
- "unsafe"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/aliases"
@@ -40,8 +39,7 @@ func SetUsesCgo(conf *types.Config) bool {
}
}
- addr := unsafe.Pointer(f.UnsafeAddr())
- *(*bool)(addr) = true
+ *(*bool)(f.Addr().UnsafePointer()) = true
return true
}
diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go
index a5f4e3252..cdd36c388 100644
--- a/vendor/golang.org/x/tools/internal/versions/features.go
+++ b/vendor/golang.org/x/tools/internal/versions/features.go
@@ -9,6 +9,7 @@ package versions
// named constants, to avoid misspelling
const (
+ Go1_17 = "go1.17"
Go1_18 = "go1.18"
Go1_19 = "go1.19"
Go1_20 = "go1.20"
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index c0c2c9a76..b767d3e33 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -35,6 +35,8 @@ import (
"google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials"
+ expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
@@ -98,6 +100,41 @@ var (
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
)
+var (
+ disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.disconnections",
+ Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+ Unit: "{disconnection}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality", "grpc.disconnect_error"},
+ Default: false,
+ })
+ connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.connection_attempts_succeeded",
+ Description: "EXPERIMENTAL. Number of successful connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
+ Default: false,
+ })
+ connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.connection_attempts_failed",
+ Description: "EXPERIMENTAL. Number of failed connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
+ Default: false,
+ })
+ openConnectionsMetric = expstats.RegisterInt64UpDownCount(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.open_connections",
+ Description: "EXPERIMENTAL. Number of open connections.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.security_level", "grpc.lb.locality"},
+ Default: false,
+ })
+)
+
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = math.MaxInt32
@@ -262,9 +299,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}()
// This creates the name resolver, load balancer, etc.
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
- return nil, err
+ if err := cc.exitIdleMode(); err != nil {
+ return nil, fmt.Errorf("failed to exit idle mode: %w", err)
}
+ cc.idlenessMgr.UnsafeSetNotIdle()
// Return now for non-blocking dials.
if !cc.dopts.block {
@@ -332,7 +370,7 @@ func (cc *ClientConn) addTraceEvent(msg string) {
Severity: channelz.CtInfo,
}
}
- channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
+ channelz.AddTraceEvent(logger, cc.channelz, 1, ted)
}
type idler ClientConn
@@ -341,14 +379,17 @@ func (i *idler) EnterIdleMode() {
(*ClientConn)(i).enterIdleMode()
}
-func (i *idler) ExitIdleMode() error {
- return (*ClientConn)(i).exitIdleMode()
+func (i *idler) ExitIdleMode() {
+ // Ignore the error returned from this method, because from the perspective
+ // of the caller (idleness manager), the channel would have always moved out
+ // of IDLE by the time this method returns.
+ (*ClientConn)(i).exitIdleMode()
}
// exitIdleMode moves the channel out of idle mode by recreating the name
// resolver and load balancer. This should never be called directly; use
// cc.idlenessMgr.ExitIdleMode instead.
-func (cc *ClientConn) exitIdleMode() (err error) {
+func (cc *ClientConn) exitIdleMode() error {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
@@ -356,11 +397,23 @@ func (cc *ClientConn) exitIdleMode() (err error) {
}
cc.mu.Unlock()
+ // Set state to CONNECTING before building the name resolver
+ // so the channel does not remain in IDLE.
+ cc.csMgr.updateState(connectivity.Connecting)
+
// This needs to be called without cc.mu because this builds a new resolver
// which might update state or report error inline, which would then need to
// acquire cc.mu.
if err := cc.resolverWrapper.start(); err != nil {
- return err
+ // If resolver creation fails, treat it like an error reported by the
+ // resolver before any valid updates. Set channel's state to
+ // TransientFailure, and set an erroring picker with the resolver build
+ // error, which will returned as part of any subsequent RPCs.
+ logger.Warningf("Failed to start resolver: %v", err)
+ cc.csMgr.updateState(connectivity.TransientFailure)
+ cc.mu.Lock()
+ cc.updateResolverStateAndUnlock(resolver.State{}, err)
+ return fmt.Errorf("failed to start resolver: %w", err)
}
cc.addTraceEvent("exiting idle mode")
@@ -681,10 +734,8 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
- cc.addTraceEvent(err.Error())
- return
- }
+ cc.idlenessMgr.ExitIdleMode()
+
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
// LB policy so that connections can be created.
cc.mu.Lock()
@@ -735,8 +786,8 @@ func init() {
internal.EnterIdleModeForTesting = func(cc *ClientConn) {
cc.idlenessMgr.EnterIdleModeForTesting()
}
- internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
- return cc.idlenessMgr.ExitIdleMode()
+ internal.ExitIdleModeForTesting = func(cc *ClientConn) {
+ cc.idlenessMgr.ExitIdleMode()
}
}
@@ -861,6 +912,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
}
+ ac.updateTelemetryLabelsLocked()
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
// we connect to different addresses.
@@ -977,7 +1029,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
}
ac.addrs = addrs
-
+ ac.updateTelemetryLabelsLocked()
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
@@ -1216,6 +1268,9 @@ type addrConn struct {
resetBackoff chan struct{}
channelz *channelz.SubChannel
+
+ localityLabel string
+ backendServiceLabel string
}
// Note: this requires a lock on ac.mu.
@@ -1223,6 +1278,18 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
+
+ // If we are transitioning out of Ready, it means there is a disconnection.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+ // part of the if condition below once the issue is fixed.
+ if ac.state == connectivity.Ready || (ac.state == connectivity.Connecting && s == connectivity.Idle) {
+ disconnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel, "unknown")
+ openConnectionsMetric.Record(ac.cc.metricsRecorderList, -1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
+ }
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@@ -1280,6 +1347,15 @@ func (ac *addrConn) resetTransportAndUnlock() {
ac.mu.Unlock()
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+ if !errors.Is(err, context.Canceled) {
+ connectionAttemptsFailedMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
+ } else {
+ if logger.V(2) {
+ // This records cancelled connection attempts which can be later
+ // replaced by a metric.
+ logger.Infof("Context cancellation detected; not recording this as a failed connection attempt.")
+ }
+ }
// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
// to ensure one resolution request per pass instead of per subconn failure.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
@@ -1319,10 +1395,50 @@ func (ac *addrConn) resetTransportAndUnlock() {
}
// Success; reset backoff.
ac.mu.Lock()
+ connectionAttemptsSucceededMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
+ openConnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
ac.backoffIdx = 0
ac.mu.Unlock()
}
+// updateTelemetryLabelsLocked calculates and caches the telemetry labels based on the
+// first address in addrConn.
+func (ac *addrConn) updateTelemetryLabelsLocked() {
+ labelsFunc, ok := internal.AddressToTelemetryLabels.(func(resolver.Address) map[string]string)
+ if !ok || len(ac.addrs) == 0 {
+ // Reset defaults
+ ac.localityLabel = ""
+ ac.backendServiceLabel = ""
+ return
+ }
+ labels := labelsFunc(ac.addrs[0])
+ ac.localityLabel = labels["grpc.lb.locality"]
+ ac.backendServiceLabel = labels["grpc.lb.backend_service"]
+}
+
+type securityLevelKey struct{}
+
+func (ac *addrConn) securityLevelLocked() string {
+ var secLevel string
+ // During disconnection, ac.transport is nil. Fall back to the security level
+ // stored in the current address during connection.
+ if ac.transport == nil {
+ secLevel, _ = ac.curAddr.Attributes.Value(securityLevelKey{}).(string)
+ return secLevel
+ }
+ authInfo := ac.transport.Peer().AuthInfo
+ if ci, ok := authInfo.(interface {
+ GetCommonAuthInfo() credentials.CommonAuthInfo
+ }); ok {
+ secLevel = ci.GetCommonAuthInfo().SecurityLevel.String()
+ // Store the security level in the current address' attributes so
+ // that it remains available for disconnection metrics after the
+ // transport is closed.
+ ac.curAddr.Attributes = ac.curAddr.Attributes.WithValue(securityLevelKey{}, secLevel)
+ }
+ return secLevel
+}
+
// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
index 2b57ba65a..472813f58 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -76,6 +76,7 @@ const (
MetricTypeFloatHisto
MetricTypeIntGauge
MetricTypeIntUpDownCount
+ MetricTypeIntAsyncGauge
)
// Int64CountHandle is a typed handle for a int count metric. This handle
@@ -172,6 +173,30 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels .
recorder.RecordInt64Gauge(h, incr, labels...)
}
+// AsyncMetric is a marker interface for asynchronous metric types.
+type AsyncMetric interface {
+ isAsync()
+ Descriptor() *MetricDescriptor
+}
+
+// Int64AsyncGaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64AsyncGaugeHandle MetricDescriptor
+
+// isAsync implements the AsyncMetric interface.
+func (h *Int64AsyncGaugeHandle) isAsync() {}
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64AsyncGaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 gauge value on the metrics recorder provided.
+func (h *Int64AsyncGaugeHandle) Record(recorder AsyncMetricsRecorder, value int64, labels ...string) {
+ recorder.RecordInt64AsyncGauge(h, value, labels...)
+}
+
// registeredMetrics are the registered metric descriptor names.
var registeredMetrics = make(map[string]bool)
@@ -282,6 +307,20 @@ func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHand
return (*Int64UpDownCountHandle)(descPtr)
}
+// RegisterInt64AsyncGauge registers the metric description onto the global registry.
+// It returns a typed handle to use for recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64AsyncGauge(descriptor MetricDescriptor) *Int64AsyncGaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntAsyncGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64AsyncGaugeHandle)(descPtr)
+}
+
// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
// registry. Returns a cleanup function that sets the metrics registry to its
// original state.
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
index cb57f1a74..d7d404cbe 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -43,6 +43,13 @@ type MetricsRecorder interface {
RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string)
}
+// AsyncMetricsRecorder records on asynchronous metrics derived from metric registry.
+type AsyncMetricsRecorder interface {
+ // RecordInt64AsyncGauge records the measurement alongside labels on the int
+ // count associated with the provided handle asynchronously
+ RecordInt64AsyncGauge(handle *Int64AsyncGaugeHandle, incr int64, labels ...string)
+}
+
// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
// Metrics will be deleted in a future release.
type Metrics = stats.MetricSet
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index ba25b8988..f38de74a4 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -67,6 +67,10 @@ type Balancer struct {
// balancerCurrent before the UpdateSubConnState is called on the
// balancerCurrent.
currentMu sync.Mutex
+
+ // activeGoroutines tracks all the goroutines that this balancer has started
+ // and that should be waited on when the balancer closes.
+ activeGoroutines sync.WaitGroup
}
// swap swaps out the current lb with the pending lb and updates the ClientConn.
@@ -76,7 +80,9 @@ func (gsb *Balancer) swap() {
cur := gsb.balancerCurrent
gsb.balancerCurrent = gsb.balancerPending
gsb.balancerPending = nil
+ gsb.activeGoroutines.Add(1)
go func() {
+ defer gsb.activeGoroutines.Done()
gsb.currentMu.Lock()
defer gsb.currentMu.Unlock()
cur.Close()
@@ -274,6 +280,7 @@ func (gsb *Balancer) Close() {
currentBalancerToClose.Close()
pendingBalancerToClose.Close()
+ gsb.activeGoroutines.Wait()
}
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
@@ -324,7 +331,12 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) {
defer bw.gsb.mu.Unlock()
bw.lastState = state
+ // If Close() acquires the mutex before UpdateState(), the balancer
+ // will already have been removed from the current or pending state when
+ // reaching this point.
if !bw.gsb.balancerCurrentOrPending(bw) {
+ // Returning here ensures that (*Balancer).swap() is not invoked after
+ // (*Balancer).Close() and therefore prevents "use after close".
return
}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 91f760936..6414ee4bb 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -77,6 +77,11 @@ var (
// - Target resolution is disabled.
// - The DNS resolver is being used.
EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true)
+
+ // XDSAuthorityRewrite indicates whether xDS authority rewriting is enabled.
+ // This feature is defined in gRFC A81 and is enabled by setting the
+ // environment variable GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE to "true".
+ XDSAuthorityRewrite = boolFromEnv("GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
index 7617be215..c90cc51bd 100644
--- a/vendor/google.golang.org/grpc/internal/experimental.go
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -25,4 +25,8 @@ var (
// BufferPool is implemented by the grpc package and returns a server
// option to configure a shared buffer pool for a grpc.Server.
BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+
+ // AcceptCompressors is implemented by the grpc package and returns
+ // a call option that restricts the grpc-accept-encoding header for a call.
+ AcceptCompressors any // func(...string) grpc.CallOption
)
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
index 2c13ee9da..d3cd24f80 100644
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -21,7 +21,6 @@
package idle
import (
- "fmt"
"math"
"sync"
"sync/atomic"
@@ -33,15 +32,15 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
return time.AfterFunc(d, f)
}
-// Enforcer is the functionality provided by grpc.ClientConn to enter
-// and exit from idle mode.
-type Enforcer interface {
- ExitIdleMode() error
+// ClientConn is the functionality provided by grpc.ClientConn to enter and exit
+// from idle mode.
+type ClientConn interface {
+ ExitIdleMode()
EnterIdleMode()
}
-// Manager implements idleness detection and calls the configured Enforcer to
-// enter/exit idle mode when appropriate. Must be created by NewManager.
+// Manager implements idleness detection and calls the ClientConn to enter/exit
+// idle mode when appropriate. Must be created by NewManager.
type Manager struct {
// State accessed atomically.
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
@@ -51,8 +50,8 @@ type Manager struct {
// Can be accessed without atomics or mutex since these are set at creation
// time and read-only after that.
- enforcer Enforcer // Functionality provided by grpc.ClientConn.
- timeout time.Duration
+ cc ClientConn // Functionality provided by grpc.ClientConn.
+ timeout time.Duration
// idleMu is used to guarantee mutual exclusion in two scenarios:
// - Opposing intentions:
@@ -72,9 +71,9 @@ type Manager struct {
// NewManager creates a new idleness manager implementation for the
// given idle timeout. It begins in idle mode.
-func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
+func NewManager(cc ClientConn, timeout time.Duration) *Manager {
return &Manager{
- enforcer: enforcer,
+ cc: cc,
timeout: timeout,
actuallyIdle: true,
activeCallsCount: -math.MaxInt32,
@@ -127,7 +126,7 @@ func (m *Manager) handleIdleTimeout() {
// Now that we've checked that there has been no activity, attempt to enter
// idle mode, which is very likely to succeed.
- if m.tryEnterIdleMode() {
+ if m.tryEnterIdleMode(true) {
// Successfully entered idle mode. No timer needed until we exit idle.
return
}
@@ -142,10 +141,13 @@ func (m *Manager) handleIdleTimeout() {
// that, it performs a last minute check to ensure that no new RPC has come in,
// making the channel active.
//
+// checkActivity controls if a check for RPC activity, since the last time the
+// idle_timeout fired, is made.
+
// Return value indicates whether or not the channel moved to idle mode.
//
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
-func (m *Manager) tryEnterIdleMode() bool {
+func (m *Manager) tryEnterIdleMode(checkActivity bool) bool {
// Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
// that the channel is either in idle mode or is trying to get there.
if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
@@ -166,7 +168,7 @@ func (m *Manager) tryEnterIdleMode() bool {
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
- if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+ if checkActivity && atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
// A very short RPC could have come in (and also finished) after we
// checked for calls count and activity in handleIdleTimeout(), but
// before the CAS operation. So, we need to check for activity again.
@@ -177,44 +179,37 @@ func (m *Manager) tryEnterIdleMode() bool {
// No new RPCs have come in since we set the active calls count value to
// -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
// unconditionally now.
- m.enforcer.EnterIdleMode()
+ m.cc.EnterIdleMode()
m.actuallyIdle = true
return true
}
// EnterIdleModeForTesting instructs the channel to enter idle mode.
func (m *Manager) EnterIdleModeForTesting() {
- m.tryEnterIdleMode()
+ m.tryEnterIdleMode(false)
}
// OnCallBegin is invoked at the start of every RPC.
-func (m *Manager) OnCallBegin() error {
+func (m *Manager) OnCallBegin() {
if m.isClosed() {
- return nil
+ return
}
if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
// Channel is not idle now. Set the activity bit and allow the call.
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
- return nil
+ return
}
// Channel is either in idle mode or is in the process of moving to idle
// mode. Attempt to exit idle mode to allow this RPC.
- if err := m.ExitIdleMode(); err != nil {
- // Undo the increment to calls count, and return an error causing the
- // RPC to fail.
- atomic.AddInt32(&m.activeCallsCount, -1)
- return err
- }
-
+ m.ExitIdleMode()
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
- return nil
}
-// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
+// ExitIdleMode instructs m to call the ClientConn's ExitIdleMode and update its
// internal state.
-func (m *Manager) ExitIdleMode() error {
+func (m *Manager) ExitIdleMode() {
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
m.idleMu.Lock()
defer m.idleMu.Unlock()
@@ -231,12 +226,10 @@ func (m *Manager) ExitIdleMode() error {
// m.ExitIdleMode.
//
// In any case, there is nothing to do here.
- return nil
+ return
}
- if err := m.enforcer.ExitIdleMode(); err != nil {
- return fmt.Errorf("failed to exit idle mode: %w", err)
- }
+ m.cc.ExitIdleMode()
// Undo the idle entry process. This also respects any new RPC attempts.
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
@@ -244,7 +237,23 @@ func (m *Manager) ExitIdleMode() error {
// Start a new timer to fire after the configured idle timeout.
m.resetIdleTimerLocked(m.timeout)
- return nil
+}
+
+// UnsafeSetNotIdle instructs the Manager to update its internal state to
+// reflect the reality that the channel is no longer in IDLE mode.
+//
+// N.B. This method is intended only for internal use by the gRPC client
+// when it exits IDLE mode **manually** from `Dial`. The callsite must ensure:
+// - The channel was **actually in IDLE mode** immediately prior to the call.
+// - There is **no concurrent activity** that could cause the channel to exit
+// IDLE mode *naturally* at the same time.
+func (m *Manager) UnsafeSetNotIdle() {
+ m.idleMu.Lock()
+ defer m.idleMu.Unlock()
+
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+ m.actuallyIdle = false
+ m.resetIdleTimerLocked(m.timeout)
}
// OnCallEnd is invoked at the end of every RPC.
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 2699223a2..27bef83d9 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -244,6 +244,10 @@ var (
// When set, the function will be called before the stream enters
// the blocking state.
NewStreamWaitingForResolver = func() {}
+
+ // AddressToTelemetryLabels is an xDS-provided function to extract telemetry
+ // labels from a resolver.Address. Callers must assert its type before calling.
+ AddressToTelemetryLabels any // func(addr resolver.Address) map[string]string
)
// HealthChecker defines the signature of the client-side LB channel health
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 65b4ab243..38ca031af 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -370,7 +370,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
})
t.logger = prefixLoggerForClientTransport(t)
// Add peer information to the http2client context.
- t.ctx = peer.NewContext(t.ctx, t.getPeer())
+ t.ctx = peer.NewContext(t.ctx, t.Peer())
if md, ok := addr.Metadata.(*metadata.MD); ok {
t.md = *md
@@ -510,7 +510,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt
return s
}
-func (t *http2Client) getPeer() *peer.Peer {
+func (t *http2Client) Peer() *peer.Peer {
return &peer.Peer{
Addr: t.remoteAddr,
AuthInfo: t.authInfo, // Can be nil
@@ -551,6 +551,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
hfLen += len(authData) + len(callAuthData)
registeredCompressors := t.registeredCompressors
+ if callHdr.AcceptedCompressors != nil {
+ registeredCompressors = *callHdr.AcceptedCompressors
+ }
if callHdr.PreviousAttempts > 0 {
hfLen++
}
@@ -742,7 +745,7 @@ func (e NewStreamError) Error() string {
// NewStream creates a stream and registers it into the transport as "active"
// streams. All non-nil errors returned will be *NewStreamError.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
- ctx = peer.NewContext(ctx, t.getPeer())
+ ctx = peer.NewContext(ctx, t.Peer())
// ServerName field of the resolver returned address takes precedence over
// Host field of CallHdr to determine the :authority header. This is because,
@@ -1485,7 +1488,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
case "grpc-status":
code, err := strconv.ParseInt(hf.Value, 10, 32)
if err != nil {
- se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+ se := status.New(codes.Unknown, fmt.Sprintf("transport: malformed grpc-status: %v", err))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
@@ -1807,8 +1810,6 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
}
}
-func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
-
func (t *http2Client) incrMsgSent() {
if channelz.IsOn() {
t.channelz.SocketMetrics.MessagesSent.Add(1)
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 6209eb23c..5bbb641ad 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -411,12 +411,6 @@ var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer {
- if memPool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream
- // is always initialized with a BufferPool.
- memPool = mem.DefaultBufferPool()
- }
-
if writeBufferSize < 0 {
writeBufferSize = 0
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 5ff83a7d7..6daf1e002 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -553,6 +553,12 @@ type CallHdr struct {
// outbound message.
SendCompress string
+ // AcceptedCompressors overrides the grpc-accept-encoding header for this
+ // call. When nil, the transport advertises the default set of registered
+ // compressors. A non-nil pointer overrides that value (including the empty
+ // string to advertise none).
+ AcceptedCompressors *string
+
// Creds specifies credentials.PerRPCCredentials for a call.
Creds credentials.PerRPCCredentials
@@ -608,8 +614,9 @@ type ClientTransport interface {
// with a human readable string with debug info.
GetGoAwayReason() (GoAwayReason, string)
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
+ // Peer returns information about the peer associated with the Transport.
+ // The returned information includes authentication and network address details.
+ Peer() *peer.Peer
}
// ServerTransport is the common interface for all gRPC server-side transport
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
index f211e7274..e37afdd19 100644
--- a/vendor/google.golang.org/grpc/mem/buffer_pool.go
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -38,9 +38,11 @@ type BufferPool interface {
Put(*[]byte)
}
+const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2.
+
var defaultBufferPoolSizes = []int{
256,
- 4 << 10, // 4KB (go page size)
+ goPageSize,
16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
32 << 10, // 32KB (default buffer size for io.Copy)
1 << 20, // 1MB
@@ -172,7 +174,14 @@ func (p *simpleBufferPool) Get(size int) *[]byte {
p.pool.Put(bs)
}
- b := make([]byte, size)
+ // If we're going to allocate, round up to the nearest page. This way if
+ // requests frequently arrive with small variation we don't allocate
+ // repeatedly if we get unlucky and they increase over time. By default we
+ // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we
+ // can round up efficiently.
+ allocSize := (size + goPageSize - 1) & ^(goPageSize - 1)
+
+ b := make([]byte, size, allocSize)
return &b
}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index 80e16a327..6e6137643 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -69,6 +69,7 @@ func (ccr *ccResolverWrapper) start() error {
errCh := make(chan error)
ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil {
+ errCh <- ctx.Err()
return
}
opts := resolver.BuildOptions{
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 6b04c9e87..8160f9430 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -33,6 +33,8 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
@@ -41,6 +43,10 @@ import (
"google.golang.org/grpc/status"
)
+func init() {
+ internal.AcceptCompressors = acceptCompressors
+}
+
// Compressor defines the interface gRPC uses to compress a message.
//
// Deprecated: use package encoding.
@@ -151,16 +157,32 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
- compressorName string
- failFast bool
- maxReceiveMessageSize *int
- maxSendMessageSize *int
- creds credentials.PerRPCCredentials
- contentSubtype string
- codec baseCodec
- maxRetryRPCBufferSize int
- onFinish []func(err error)
- authority string
+ compressorName string
+ failFast bool
+ maxReceiveMessageSize *int
+ maxSendMessageSize *int
+ creds credentials.PerRPCCredentials
+ contentSubtype string
+ codec baseCodec
+ maxRetryRPCBufferSize int
+ onFinish []func(err error)
+ authority string
+ acceptedResponseCompressors []string
+}
+
+func acceptedCompressorAllows(allowed []string, name string) bool {
+ if allowed == nil {
+ return true
+ }
+ if name == "" || name == encoding.Identity {
+ return true
+ }
+ for _, a := range allowed {
+ if a == name {
+ return true
+ }
+ }
+ return false
}
func defaultCallInfo() *callInfo {
@@ -170,6 +192,29 @@ func defaultCallInfo() *callInfo {
}
}
+func newAcceptedCompressionConfig(names []string) ([]string, error) {
+ if len(names) == 0 {
+ return nil, nil
+ }
+ var allowed []string
+ seen := make(map[string]struct{}, len(names))
+ for _, name := range names {
+ name = strings.TrimSpace(name)
+ if name == "" || name == encoding.Identity {
+ continue
+ }
+ if !grpcutil.IsCompressorNameRegistered(name) {
+ return nil, status.Errorf(codes.InvalidArgument, "grpc: compressor %q is not registered", name)
+ }
+ if _, dup := seen[name]; dup {
+ continue
+ }
+ seen[name] = struct{}{}
+ allowed = append(allowed, name)
+ }
+ return allowed, nil
+}
+
// CallOption configures a Call before it starts or extracts information from
// a Call after it completes.
type CallOption interface {
@@ -471,6 +516,31 @@ func (o CompressorCallOption) before(c *callInfo) error {
}
func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
+// acceptCompressors returns a CallOption that limits the compression algorithms
+// advertised in the grpc-accept-encoding header for response messages.
+// Compression algorithms not in the provided list will not be advertised, and
+// responses compressed with non-listed algorithms will be rejected.
+func acceptCompressors(names ...string) CallOption {
+ cp := append([]string(nil), names...)
+ return acceptCompressorsCallOption{names: cp}
+}
+
+// acceptCompressorsCallOption is a CallOption that limits response compression.
+type acceptCompressorsCallOption struct {
+ names []string
+}
+
+func (o acceptCompressorsCallOption) before(c *callInfo) error {
+ allowed, err := newAcceptedCompressionConfig(o.names)
+ if err != nil {
+ return err
+ }
+ c.acceptedResponseCompressors = allowed
+ return nil
+}
+
+func (acceptCompressorsCallOption) after(*callInfo, *csAttempt) {}
+
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
// the wire will be "application/grpc+json". The content-subtype is converted
@@ -857,8 +927,7 @@ func (p *payloadInfo) free() {
// the buffer is no longer needed.
// TODO: Refactor this function to reduce the number of arguments.
// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
-func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
-) (out mem.BufferSlice, err error) {
+func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) (out mem.BufferSlice, err error) {
pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return nil, err
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index ca87ff977..ec9577b27 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -25,6 +25,7 @@ import (
"math"
rand "math/rand/v2"
"strconv"
+ "strings"
"sync"
"time"
@@ -179,13 +180,41 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
var emptyMethodConfig = serviceconfig.MethodConfig{}
+// endOfClientStream performs cleanup actions required for both successful and
+// failed streams. This includes incrementing channelz stats and invoking all
+// registered OnFinish call options.
+func endOfClientStream(cc *ClientConn, err error, opts ...CallOption) {
+ if channelz.IsOn() {
+ if err != nil {
+ cc.incrCallsFailed()
+ } else {
+ cc.incrCallsSucceeded()
+ }
+ }
+
+ for _, o := range opts {
+ if o, ok := o.(OnFinishCallOption); ok {
+ o.OnFinish(err)
+ }
+ }
+}
+
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+ if channelz.IsOn() {
+ cc.incrCallsStarted()
+ }
+ defer func() {
+ if err != nil {
+ // Ensure cleanup when stream creation fails.
+ endOfClientStream(cc, err, opts...)
+ }
+ }()
+
// Start tracking the RPC for idleness purposes. This is where a stream is
// created for both streaming and unary RPCs, and hence is a good place to
// track active RPC count.
- if err := cc.idlenessMgr.OnCallBegin(); err != nil {
- return nil, err
- }
+ cc.idlenessMgr.OnCallBegin()
+
// Add a calloption, to decrement the active call count, that gets executed
// when the RPC completes.
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
@@ -204,14 +233,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}
}
- if channelz.IsOn() {
- cc.incrCallsStarted()
- defer func() {
- if err != nil {
- cc.incrCallsFailed()
- }
- }()
- }
// Provide an opportunity for the first RPC to see the first service config
// provided by the resolver.
nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx)
@@ -301,6 +322,10 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
DoneFunc: doneFunc,
Authority: callInfo.authority,
}
+ if allowed := callInfo.acceptedResponseCompressors; len(allowed) > 0 {
+ headerValue := strings.Join(allowed, ",")
+ callHdr.AcceptedCompressors = &headerValue
+ }
// Set our outgoing compression according to the UseCompressor CallOption, if
// set. In that case, also find the compressor from the encoding package.
@@ -484,7 +509,7 @@ func (a *csAttempt) getTransport() error {
return err
}
if a.trInfo != nil {
- a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
+ a.trInfo.firstLine.SetRemoteAddr(a.transport.Peer().Addr)
}
if pick.blocked && a.statsHandler != nil {
a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
@@ -1042,9 +1067,6 @@ func (cs *clientStream) finish(err error) {
return
}
cs.finished = true
- for _, onFinish := range cs.callInfo.onFinish {
- onFinish(err)
- }
cs.commitAttemptLocked()
if cs.attempt != nil {
cs.attempt.finish(err)
@@ -1084,13 +1106,7 @@ func (cs *clientStream) finish(err error) {
if err == nil {
cs.retryThrottler.successfulRPC()
}
- if channelz.IsOn() {
- if err != nil {
- cs.cc.incrCallsFailed()
- } else {
- cs.cc.incrCallsSucceeded()
- }
- }
+ endOfClientStream(cs.cc, err, cs.opts...)
cs.cancel()
}
@@ -1134,6 +1150,10 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
a.decompressorV0 = nil
a.decompressorV1 = encoding.GetCompressor(ct)
}
+ // Validate that the compression method is acceptable for this call.
+ if !acceptedCompressorAllows(cs.callInfo.acceptedResponseCompressors, ct) {
+ return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct)
+ }
} else {
// No compression is used; disable our decompressor.
a.decompressorV0 = nil
@@ -1479,6 +1499,10 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
as.decompressorV0 = nil
as.decompressorV1 = encoding.GetCompressor(ct)
}
+ // Validate that the compression method is acceptable for this call.
+ if !acceptedCompressorAllows(as.callInfo.acceptedResponseCompressors, ct) {
+ return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct)
+ }
} else {
// No compression is used; disable our decompressor.
as.decompressorV0 = nil
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 9e6d018fb..ff7840fd8 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.77.0"
+const Version = "1.78.0"
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 669133d04..c96e44834 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -32,7 +32,7 @@ var byteType = reflect.TypeOf(byte(0))
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
f := new(filedesc.Field)
f.L0.ParentFile = filedesc.SurrogateProto2
- f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
+ packed := false
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
@@ -108,7 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
f.L1.StringName.InitJSON(jsonName)
}
case s == "packed":
- f.L1.EditionFeatures.IsPacked = true
+ packed = true
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
@@ -121,6 +121,13 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
tag = strings.TrimPrefix(tag[i:], ",")
}
+ // Update EditionFeatures after the loop and after we know whether this is
+ // a proto2 or proto3 field.
+ f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
+ if packed {
+ f.L1.EditionFeatures.IsPacked = true
+ }
+
// The generator uses the group message name instead of the field name.
// We obtain the real field name by lowercasing the group name.
if f.L1.Kind == protoreflect.GroupKind {
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
index 099b2bf45..9aa7a9bb7 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -424,27 +424,34 @@ func (d *Decoder) parseFieldName() (tok Token, err error) {
return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))
}
-// parseTypeName parses Any type URL or extension field name. The name is
-// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
-// strings. This implementation is more liberal and allows for the pattern
-// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
-// in between [ ], '.', '/' and the sub names.
+// parseTypeName parses an Any type URL or an extension field name. The name is
+// enclosed in [ and ] characters. We allow almost arbitrary type URL prefixes,
+// closely following the text-format spec [1,2]. We implement "ExtensionName |
+// AnyName" as follows (with some exceptions for backwards compatibility):
+//
+// char = [-_a-zA-Z0-9]
+// url_char = char | [.~!$&'()*+,;=] | "%", hex, hex
+//
+// Ident = char, { char }
+// TypeName = Ident, { ".", Ident } ;
+// UrlPrefix = url_char, { url_char | "/" } ;
+// ExtensionName = "[", TypeName, "]" ;
+// AnyName = "[", UrlPrefix, "/", TypeName, "]" ;
+//
+// Additionally, we allow arbitrary whitespace and comments between [ and ].
+//
+// [1] https://protobuf.dev/reference/protobuf/textformat-spec/#characters
+// [2] https://protobuf.dev/reference/protobuf/textformat-spec/#field-names
func (d *Decoder) parseTypeName() (Token, error) {
- startPos := len(d.orig) - len(d.in)
// Use alias s to advance first in order to use d.in for error handling.
- // Caller already checks for [ as first character.
+ // Caller already checks for [ as first character (d.in[0] == '[').
s := consume(d.in[1:], 0)
if len(s) == 0 {
return Token{}, ErrUnexpectedEOF
}
+ // Collect everything between [ and ] in name.
var name []byte
- for len(s) > 0 && isTypeNameChar(s[0]) {
- name = append(name, s[0])
- s = s[1:]
- }
- s = consume(s, 0)
-
var closed bool
for len(s) > 0 && !closed {
switch {
@@ -452,23 +459,20 @@ func (d *Decoder) parseTypeName() (Token, error) {
s = s[1:]
closed = true
- case s[0] == '/', s[0] == '.':
- if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
- return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
- d.orig[startPos:len(d.orig)-len(s)+1])
- }
+ case s[0] == '/' || isTypeNameChar(s[0]) || isUrlExtraChar(s[0]):
name = append(name, s[0])
- s = s[1:]
- s = consume(s, 0)
- for len(s) > 0 && isTypeNameChar(s[0]) {
- name = append(name, s[0])
- s = s[1:]
+ s = consume(s[1:], 0)
+
+ // URL percent-encoded chars
+ case s[0] == '%':
+ if len(s) < 3 || !isHexChar(s[1]) || !isHexChar(s[2]) {
+ return Token{}, d.parseTypeNameError(s, 3)
}
- s = consume(s, 0)
+ name = append(name, s[0], s[1], s[2])
+ s = consume(s[3:], 0)
default:
- return Token{}, d.newSyntaxError(
- "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
+ return Token{}, d.parseTypeNameError(s, 1)
}
}
@@ -476,15 +480,38 @@ func (d *Decoder) parseTypeName() (Token, error) {
return Token{}, ErrUnexpectedEOF
}
- // First character cannot be '.'. Last character cannot be '.' or '/'.
- size := len(name)
- if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
- return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
- d.orig[startPos:len(d.orig)-len(s)])
+ // Split collected name on last '/' into urlPrefix and typeName (if '/' is
+ // present).
+ typeName := name
+ if i := bytes.LastIndexByte(name, '/'); i != -1 {
+ urlPrefix := name[:i]
+ typeName = name[i+1:]
+
+ // urlPrefix may be empty (for backwards compatibility).
+ // If non-empty, it must not start with '/'.
+ if len(urlPrefix) > 0 && urlPrefix[0] == '/' {
+ return Token{}, d.parseTypeNameError(s, 0)
+ }
}
+ // typeName must not be empty (note: "" splits to [""]) and all identifier
+ // parts must not be empty.
+ for _, ident := range bytes.Split(typeName, []byte{'.'}) {
+ if len(ident) == 0 {
+ return Token{}, d.parseTypeNameError(s, 0)
+ }
+ }
+
+ // typeName must not contain any percent-encoded or special URL chars.
+ for _, b := range typeName {
+ if b == '%' || (b != '.' && isUrlExtraChar(b)) {
+ return Token{}, d.parseTypeNameError(s, 0)
+ }
+ }
+
+ startPos := len(d.orig) - len(d.in)
+ endPos := len(d.orig) - len(s)
d.in = s
- endPos := len(d.orig) - len(d.in)
d.consume(0)
return Token{
@@ -496,16 +523,32 @@ func (d *Decoder) parseTypeName() (Token, error) {
}, nil
}
+func (d *Decoder) parseTypeNameError(s []byte, numUnconsumedChars int) error {
+ return d.newSyntaxError(
+ "invalid type URL/extension field name: %s",
+ d.in[:len(d.in)-len(s)+min(numUnconsumedChars, len(s))],
+ )
+}
+
+func isHexChar(b byte) bool {
+ return ('0' <= b && b <= '9') ||
+ ('a' <= b && b <= 'f') ||
+ ('A' <= b && b <= 'F')
+}
+
func isTypeNameChar(b byte) bool {
- return (b == '-' || b == '_' ||
+ return b == '-' || b == '_' ||
('0' <= b && b <= '9') ||
('a' <= b && b <= 'z') ||
- ('A' <= b && b <= 'Z'))
+ ('A' <= b && b <= 'Z')
}
-func isWhiteSpace(b byte) bool {
+// isUrlExtraChar complements isTypeNameChar with extra characters that we allow
+// in URLs but not in type names. Note that '/' is not included so that it can
+// be treated specially.
+func isUrlExtraChar(b byte) bool {
switch b {
- case ' ', '\n', '\r', '\t':
+ case '.', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=':
return true
default:
return false
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index dbcf90b87..c775e5832 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -32,6 +32,7 @@ const (
EditionProto3 Edition = 999
Edition2023 Edition = 1000
Edition2024 Edition = 1001
+ EditionUnstable Edition = 9999
EditionUnsupported Edition = 100000
)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index dd31faaeb..78f02b1b4 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -330,7 +330,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
case genid.DescriptorProto_Options_field_number:
- md.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -356,27 +355,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
}
-func (md *Message) unmarshalOptions(b []byte) {
- for len(b) > 0 {
- num, typ, n := protowire.ConsumeTag(b)
- b = b[n:]
- switch typ {
- case protowire.VarintType:
- v, m := protowire.ConsumeVarint(b)
- b = b[m:]
- switch num {
- case genid.MessageOptions_MapEntry_field_number:
- md.L1.IsMapEntry = protowire.DecodeBool(v)
- case genid.MessageOptions_MessageSetWireFormat_field_number:
- md.L1.IsMessageSet = protowire.DecodeBool(v)
- }
- default:
- m := protowire.ConsumeFieldValue(num, typ, b)
- b = b[m:]
- }
- }
-}
-
func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 950a6a325..65aaf4d21 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -26,6 +26,7 @@ const (
Edition_EDITION_PROTO3_enum_value = 999
Edition_EDITION_2023_enum_value = 1000
Edition_EDITION_2024_enum_value = 1001
+ Edition_EDITION_UNSTABLE_enum_value = 9999
Edition_EDITION_1_TEST_ONLY_enum_value = 1
Edition_EDITION_2_TEST_ONLY_enum_value = 2
Edition_EDITION_99997_TEST_ONLY_enum_value = 99997
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index 229c69801..4a3bf393e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -113,6 +113,9 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
}
func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if opts.depth--; opts.depth < 0 {
+ return out, errRecursionDepth
+ }
if wtyp != protowire.BytesType {
return out, errUnknown
}
@@ -170,6 +173,9 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
}
func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if opts.depth--; opts.depth < 0 {
+ return out, errRecursionDepth
+ }
if wtyp != protowire.BytesType {
return out, errUnknown
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index e0dd21fa5..1228b5c8c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -102,8 +102,7 @@ var errUnknown = errors.New("unknown")
func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
mi.init()
- opts.depth--
- if opts.depth < 0 {
+ if opts.depth--; opts.depth < 0 {
return out, errRecursionDepth
}
if flags.ProtoLegacy && mi.isMessageSet {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index 7b2995dde..99a1eb95f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -68,9 +68,13 @@ func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out pr
if in.Resolver == nil {
in.Resolver = protoregistry.GlobalTypes
}
+ if in.Depth == 0 {
+ in.Depth = protowire.DefaultRecursionLimit
+ }
o, st := mi.validate(in.Buf, 0, unmarshalOptions{
flags: in.Flags,
resolver: in.Resolver,
+ depth: in.Depth,
})
if o.initialized {
out.Flags |= protoiface.UnmarshalInitialized
@@ -257,6 +261,9 @@ func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmars
states[0].typ = validationTypeGroup
states[0].endGroup = groupTag
}
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
initialized := true
start := len(b)
State:
@@ -451,6 +458,13 @@ State:
mi: vi.mi,
tail: b,
})
+ if vi.typ == validationTypeMessage ||
+ vi.typ == validationTypeGroup ||
+ vi.typ == validationTypeMap {
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
+ }
b = v
continue State
case validationTypeRepeatedVarint:
@@ -499,6 +513,9 @@ State:
mi: vi.mi,
endGroup: num,
})
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
continue State
case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem:
typeid, v, n, err := messageset.ConsumeFieldValue(b, false)
@@ -521,6 +538,13 @@ State:
mi: xvi.mi,
tail: b[n:],
})
+ if xvi.typ == validationTypeMessage ||
+ xvi.typ == validationTypeGroup ||
+ xvi.typ == validationTypeMap {
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
+ }
b = v
continue State
}
@@ -547,12 +571,14 @@ State:
switch st.typ {
case validationTypeMessage, validationTypeGroup:
numRequiredFields = int(st.mi.numRequiredFields)
+ opts.depth++
case validationTypeMap:
// If this is a map field with a message value that contains
// required fields, require that the value be present.
if st.mi != nil && st.mi.numRequiredFields > 0 {
numRequiredFields = 1
}
+ opts.depth++
}
// If there are more than 64 required fields, this check will
// always fail and we will report that the message is potentially
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 77de0f238..763fd8284 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 10
+ Patch = 11
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 4cbf1aeaf..889d8511d 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -121,9 +121,8 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
out, err = methods.Unmarshal(in)
} else {
- o.RecursionLimit--
- if o.RecursionLimit < 0 {
- return out, errors.New("exceeded max recursion depth")
+ if o.RecursionLimit--; o.RecursionLimit < 0 {
+ return out, errRecursionDepth
}
err = o.unmarshalMessageSlow(b, m)
}
@@ -220,6 +219,9 @@ func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m pro
}
func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) {
+ if o.RecursionLimit--; o.RecursionLimit < 0 {
+ return 0, errRecursionDepth
+ }
if wtyp != protowire.BytesType {
return 0, errUnknown
}
@@ -305,3 +307,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto
var errUnknown = errors.New("BUG: internal error (unknown)")
var errDecode = errors.New("cannot parse invalid wire-format data")
+
+var errRecursionDepth = errors.New("exceeded maximum recursion depth")
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 9196288e4..40f17af4e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -108,7 +108,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
if f.L1.Path == "" {
return nil, errors.New("file path must be populated")
}
- if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
+ if f.L1.Syntax == protoreflect.Editions &&
+ (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) &&
+ fd.GetEdition() != descriptorpb.Edition_EDITION_UNSTABLE {
// Allow cmd/protoc-gen-go/testdata to use any edition for easier
// testing of upcoming edition features.
if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
@@ -152,6 +154,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
imp := &f.L2.Imports[i]
imps.importPublic(imp.Imports())
}
+ optionImps := importSet{f.Path(): true}
if len(fd.GetOptionDependency()) > 0 {
optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency()))
for i, path := range fd.GetOptionDependency() {
@@ -165,10 +168,12 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
}
imp.FileDescriptor = f
- if imps[imp.Path()] {
+ if imps[imp.Path()] || optionImps[imp.Path()] {
return nil, errors.New("already imported %q", path)
}
- imps[imp.Path()] = true
+ // This needs to be a separate map so that we don't recognize non-options
+ // symbols coming from option imports.
+ optionImps[imp.Path()] = true
}
f.L2.OptionImports = func() protoreflect.FileImports {
return &optionImports
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 697a61b29..147b8c739 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -46,6 +46,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
return descriptorpb.Edition_EDITION_2023
case filedesc.Edition2024:
return descriptorpb.Edition_EDITION_2024
+ case filedesc.EditionUnstable:
+ return descriptorpb.Edition_EDITION_UNSTABLE
default:
panic(fmt.Sprintf("unknown value for edition: %v", ed))
}
@@ -58,7 +60,7 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
return def
}
edpb := toEditionProto(ed)
- if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb {
+ if (defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb) && edpb != descriptorpb.Edition_EDITION_UNSTABLE {
// This should never happen protodesc.(FileOptions).New would fail when
// initializing the file descriptor.
// This most likely means the embedded defaults were not updated.
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 4eacb523c..0b23faa95 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -69,6 +69,8 @@ const (
// comparison.
Edition_EDITION_2023 Edition = 1000
Edition_EDITION_2024 Edition = 1001
+ // A placeholder edition for developing and testing unscheduled features.
+ Edition_EDITION_UNSTABLE Edition = 9999
// Placeholder editions for testing feature resolution. These should not be
// used or relied on outside of tests.
Edition_EDITION_1_TEST_ONLY Edition = 1
@@ -91,6 +93,7 @@ var (
999: "EDITION_PROTO3",
1000: "EDITION_2023",
1001: "EDITION_2024",
+ 9999: "EDITION_UNSTABLE",
1: "EDITION_1_TEST_ONLY",
2: "EDITION_2_TEST_ONLY",
99997: "EDITION_99997_TEST_ONLY",
@@ -105,6 +108,7 @@ var (
"EDITION_PROTO3": 999,
"EDITION_2023": 1000,
"EDITION_2024": 1001,
+ "EDITION_UNSTABLE": 9999,
"EDITION_1_TEST_ONLY": 1,
"EDITION_2_TEST_ONLY": 2,
"EDITION_99997_TEST_ONLY": 99997,
@@ -4793,11 +4797,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\x18EnumValueDescriptorProto\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
"\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
- "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" +
+ "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xb5\x01\n" +
"\x16ServiceDescriptorProto\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
"\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
- "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" +
+ "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptionsJ\x04\b\x04\x10\x05R\x06stream\"\x89\x02\n" +
"\x15MethodDescriptorProto\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
"\n" +
@@ -5033,14 +5037,15 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
"\bSemantic\x12\b\n" +
"\x04NONE\x10\x00\x12\a\n" +
"\x03SET\x10\x01\x12\t\n" +
- "\x05ALIAS\x10\x02*\xa7\x02\n" +
+ "\x05ALIAS\x10\x02*\xbe\x02\n" +
"\aEdition\x12\x13\n" +
"\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
"\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
"\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
"\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
"\fEDITION_2023\x10\xe8\a\x12\x11\n" +
- "\fEDITION_2024\x10\xe9\a\x12\x17\n" +
+ "\fEDITION_2024\x10\xe9\a\x12\x15\n" +
+ "\x10EDITION_UNSTABLE\x10\x8fN\x12\x17\n" +
"\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
"\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
"\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 06d584c14..484c21fd5 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -172,13 +172,14 @@ import (
// ) to obtain a formatter capable of generating timestamps in this format.
type Timestamp struct {
state protoimpl.MessageState `protogen:"open.v1"`
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
+ // Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must
+ // be between -315576000000 and 315576000000 inclusive (which corresponds to
+ // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z).
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
+ // Non-negative fractions of a second at nanosecond resolution. This field is
+ // the nanosecond portion of the duration, not an alternative to seconds.
+ // Negative second values with fractions must still have non-negative nanos
+ // values that count forward in time. Must be between 0 and 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
unknownFields protoimpl.UnknownFields
diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go
index 5f983b6b6..e07c04e62 100644
--- a/vendor/k8s.io/client-go/tools/cache/controller.go
+++ b/vendor/k8s.io/client-go/tools/cache/controller.go
@@ -596,16 +596,7 @@ func newInformer(clientState Store, options InformerOptions) Controller {
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
- var fifo Queue
- if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) {
- fifo = NewRealFIFO(MetaNamespaceKeyFunc, clientState, options.Transform)
- } else {
- fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{
- KnownObjects: clientState,
- EmitDeltaTypeReplaced: true,
- Transformer: options.Transform,
- })
- }
+ fifo := newQueueFIFO(clientState, options.Transform)
cfg := &Config{
Queue: fifo,
@@ -623,3 +614,15 @@ func newInformer(clientState Store, options InformerOptions) Controller {
}
return New(cfg)
}
+
+func newQueueFIFO(clientState Store, transform TransformFunc) Queue {
+ if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) {
+ return NewRealFIFO(MetaNamespaceKeyFunc, clientState, transform)
+ } else {
+ return NewDeltaFIFOWithOptions(DeltaFIFOOptions{
+ KnownObjects: clientState,
+ EmitDeltaTypeReplaced: true,
+ Transformer: transform,
+ })
+ }
+}
diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
index 9d9e238cc..a0d7a834a 100644
--- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
+++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
@@ -270,7 +270,8 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
}
var (
- _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
+ _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
+ _ = TransformingStore(&DeltaFIFO{}) // DeltaFIFO implements TransformingStore to allow memory optimizations
)
var (
diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go
index ee9be7727..6fd43375f 100644
--- a/vendor/k8s.io/client-go/tools/cache/reflector.go
+++ b/vendor/k8s.io/client-go/tools/cache/reflector.go
@@ -80,7 +80,7 @@ type ReflectorStore interface {
// TransformingStore is an optional interface that can be implemented by the provided store.
// If implemented on the provided store reflector will use the same transformer in its internal stores.
type TransformingStore interface {
- Store
+ ReflectorStore
Transformer() TransformFunc
}
@@ -726,9 +726,11 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) {
return false
}
+ var transformer TransformFunc
storeOpts := []StoreOption{}
if tr, ok := r.store.(TransformingStore); ok && tr.Transformer() != nil {
- storeOpts = append(storeOpts, WithTransformer(tr.Transformer()))
+ transformer = tr.Transformer()
+ storeOpts = append(storeOpts, WithTransformer(transformer))
}
initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name})
@@ -788,7 +790,7 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) {
// we utilize the temporaryStore to ensure independence from the current store implementation.
// as of today, the store is implemented as a queue and will be drained by the higher-level
// component as soon as it finishes replacing the content.
- checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, temporaryStore.List)
+ checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, transformer, temporaryStore.List)
if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil {
return nil, fmt.Errorf("unable to sync watch-list result: %w", err)
diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
index a7e0d9c43..4119c78a6 100644
--- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
+++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
@@ -33,11 +33,11 @@ import (
//
// Note that this function will panic when data inconsistency is detected.
// This is intentional because we want to catch it in the CI.
-func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) {
+func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], listItemTransformFunc func(interface{}) (interface{}, error), retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) {
if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() {
return
}
// for informers we pass an empty ListOptions because
// listFn might be wrapped for filtering during informer construction.
- consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn)
+ consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, listItemTransformFunc, metav1.ListOptions{}, retrieveItemsFn)
}
diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
index 99e5fcd18..1c12aa2d6 100644
--- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go
+++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
@@ -539,16 +539,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) {
s.startedLock.Lock()
defer s.startedLock.Unlock()
- var fifo Queue
- if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) {
- fifo = NewRealFIFO(MetaNamespaceKeyFunc, s.indexer, s.transform)
- } else {
- fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{
- KnownObjects: s.indexer,
- EmitDeltaTypeReplaced: true,
- Transformer: s.transform,
- })
- }
+ fifo := newQueueFIFO(s.indexer, s.transform)
cfg := &Config{
Queue: fifo,
diff --git a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go
index ef322bea8..b907410dc 100644
--- a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go
+++ b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go
@@ -61,7 +61,8 @@ type RealFIFO struct {
}
var (
- _ = Queue(&RealFIFO{}) // RealFIFO is a Queue
+ _ = Queue(&RealFIFO{}) // RealFIFO is a Queue
+ _ = TransformingStore(&RealFIFO{}) // RealFIFO implements TransformingStore to allow memory optimizations
)
// Close the queue.
diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
index 06f172d82..72c0124a0 100644
--- a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
+++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
@@ -45,16 +45,28 @@ func IsDataConsistencyDetectionForWatchListEnabled() bool {
return dataConsistencyDetectionForWatchListEnabled
}
+// SetDataConsistencyDetectionForWatchListEnabledForTest allows to enable/disable data consistency detection for testing purposes.
+// It returns a function that restores the original value.
+func SetDataConsistencyDetectionForWatchListEnabledForTest(enabled bool) func() {
+ original := dataConsistencyDetectionForWatchListEnabled
+ dataConsistencyDetectionForWatchListEnabled = enabled
+ return func() {
+ dataConsistencyDetectionForWatchListEnabled = original
+ }
+}
+
type RetrieveItemsFunc[U any] func() []U
type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error)
+type TransformFunc func(interface{}) (interface{}, error)
+
// CheckDataConsistency exists solely for testing purposes.
// we cannot use checkWatchListDataConsistencyIfRequested because
// it is guarded by an environmental variable.
// we cannot manipulate the environmental variable because
// it will affect other tests in this package.
-func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) {
+func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listItemTransformFunc TransformFunc, listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) {
if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) {
klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity)
return
@@ -84,6 +96,15 @@ func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity
if err != nil {
panic(err) // this should never happen
}
+ if listItemTransformFunc != nil {
+ for i := range rawListItems {
+ obj, err := listItemTransformFunc(rawListItems[i])
+ if err != nil {
+ panic(err)
+ }
+ rawListItems[i] = obj.(runtime.Object)
+ }
+ }
listItems := toMetaObjectSliceOrDie(rawListItems)
sort.Sort(byUID(listItems))
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 30615a557..41c5862f1 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -40,7 +40,7 @@ github.com/DataDog/datadog-agent/pkg/version
# github.com/DataDog/datadog-go/v5 v5.6.0
## explicit; go 1.13
github.com/DataDog/datadog-go/v5/statsd
-# github.com/DataDog/dd-trace-go/v2 v2.4.0
+# github.com/DataDog/dd-trace-go/v2 v2.5.0
## explicit; go 1.24.0
github.com/DataDog/dd-trace-go/v2/appsec/events
github.com/DataDog/dd-trace-go/v2/datastreams/options
@@ -84,6 +84,7 @@ github.com/DataDog/dd-trace-go/v2/internal/appsec/listener/waf
github.com/DataDog/dd-trace-go/v2/internal/civisibility/constants
github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils
github.com/DataDog/dd-trace-go/v2/internal/civisibility/utils/telemetry
+github.com/DataDog/dd-trace-go/v2/internal/config
github.com/DataDog/dd-trace-go/v2/internal/datastreams
github.com/DataDog/dd-trace-go/v2/internal/env
github.com/DataDog/dd-trace-go/v2/internal/globalconfig
@@ -117,7 +118,7 @@ github.com/DataDog/dd-trace-go/v2/internal/telemetry/log
github.com/DataDog/dd-trace-go/v2/internal/traceprof
github.com/DataDog/dd-trace-go/v2/internal/urlsanitizer
github.com/DataDog/dd-trace-go/v2/internal/version
-# github.com/DataDog/go-libddwaf/v4 v4.6.1
+# github.com/DataDog/go-libddwaf/v4 v4.8.0
## explicit; go 1.23.0
github.com/DataDog/go-libddwaf/v4
github.com/DataDog/go-libddwaf/v4/internal/bindings
@@ -153,9 +154,6 @@ github.com/DataDog/sketches-go/ddsketch/mapping
github.com/DataDog/sketches-go/ddsketch/pb/sketchpb
github.com/DataDog/sketches-go/ddsketch/stat
github.com/DataDog/sketches-go/ddsketch/store
-# github.com/Masterminds/semver/v3 v3.4.0
-## explicit; go 1.21
-github.com/Masterminds/semver/v3
# github.com/Microsoft/go-winio v0.6.2
## explicit; go 1.21
github.com/Microsoft/go-winio
@@ -189,7 +187,7 @@ github.com/cihub/seelog/archive/zip
## explicit; go 1.13
github.com/coredns/caddy
github.com/coredns/caddy/caddyfile
-# github.com/coredns/coredns v1.13.2
+# github.com/coredns/coredns v1.14.1
## explicit; go 1.24.0
github.com/coredns/coredns/core/dnsserver
github.com/coredns/coredns/coremain
@@ -312,10 +310,6 @@ github.com/go-openapi/swag
# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572
## explicit; go 1.13
github.com/go-task/slim-sprig
-# github.com/go-viper/mapstructure/v2 v2.4.0
-## explicit; go 1.18
-github.com/go-viper/mapstructure/v2
-github.com/go-viper/mapstructure/v2/internal/errors
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
github.com/gogo/protobuf/gogoproto
@@ -342,7 +336,7 @@ github.com/google/go-cmp/cmp/internal/value
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2
## explicit; go 1.23.0
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options
# github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
@@ -386,7 +380,7 @@ github.com/mailru/easyjson/jwriter
# github.com/matttproud/golang_protobuf_extensions v1.0.4
## explicit; go 1.9
github.com/matttproud/golang_protobuf_extensions/pbutil
-# github.com/miekg/dns v1.1.69
+# github.com/miekg/dns v1.1.70
## explicit; go 1.24.0
github.com/miekg/dns
# github.com/minio/simdjson-go v0.4.5
@@ -504,7 +498,7 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal
# github.com/prometheus/client_model v0.6.2
## explicit; go 1.22.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.67.4
+# github.com/prometheus/common v0.67.5
## explicit; go 1.24.0
github.com/prometheus/common/expfmt
github.com/prometheus/common/model
@@ -519,7 +513,7 @@ github.com/puzpuzpuz/xsync/v3
# github.com/quic-go/qpack v0.6.0
## explicit; go 1.24
github.com/quic-go/qpack
-# github.com/quic-go/quic-go v0.57.0
+# github.com/quic-go/quic-go v0.59.0
## explicit; go 1.24
github.com/quic-go/quic-go
github.com/quic-go/quic-go/http3
@@ -622,32 +616,24 @@ go.opentelemetry.io/auto/sdk/internal/telemetry
# go.opentelemetry.io/collector/component v1.39.0
## explicit; go 1.24
go.opentelemetry.io/collector/component
-# go.opentelemetry.io/collector/featuregate v1.39.0
-## explicit; go 1.24
+# go.opentelemetry.io/collector/featuregate v1.46.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/featuregate
# go.opentelemetry.io/collector/internal/telemetry v0.133.0
## explicit; go 1.24
go.opentelemetry.io/collector/internal/telemetry
go.opentelemetry.io/collector/internal/telemetry/componentattribute
-# go.opentelemetry.io/collector/pdata v1.39.0
-## explicit; go 1.24
+# go.opentelemetry.io/collector/pdata v1.46.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/pdata/internal
-go.opentelemetry.io/collector/pdata/internal/data
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development
-go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1
go.opentelemetry.io/collector/pdata/internal/json
go.opentelemetry.io/collector/pdata/internal/otlp
go.opentelemetry.io/collector/pdata/internal/proto
go.opentelemetry.io/collector/pdata/pcommon
go.opentelemetry.io/collector/pdata/ptrace
+# go.opentelemetry.io/collector/pdata/pprofile v0.140.0
+## explicit; go 1.24.0
+go.opentelemetry.io/collector/pdata/pprofile
# go.opentelemetry.io/contrib/bridges/otelzap v0.12.0
## explicit; go 1.23.0
go.opentelemetry.io/contrib/bridges/otelzap
@@ -724,7 +710,7 @@ go.yaml.in/yaml/v2
# go.yaml.in/yaml/v3 v3.0.4
## explicit; go 1.16
go.yaml.in/yaml/v3
-# golang.org/x/crypto v0.46.0
+# golang.org/x/crypto v0.47.0
## explicit; go 1.24.0
golang.org/x/crypto/chacha20
golang.org/x/crypto/chacha20poly1305
@@ -734,10 +720,10 @@ golang.org/x/crypto/internal/poly1305
# golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
## explicit; go 1.23.0
golang.org/x/exp/constraints
-# golang.org/x/mod v0.30.0
+# golang.org/x/mod v0.31.0
## explicit; go 1.24.0
golang.org/x/mod/semver
-# golang.org/x/net v0.48.0
+# golang.org/x/net v0.49.0
## explicit; go 1.24.0
golang.org/x/net/bpf
golang.org/x/net/context
@@ -754,25 +740,26 @@ golang.org/x/net/internal/socket
golang.org/x/net/internal/timeseries
golang.org/x/net/ipv4
golang.org/x/net/ipv6
+golang.org/x/net/netutil
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.33.0
+# golang.org/x/oauth2 v0.34.0
## explicit; go 1.24.0
golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.19.0
## explicit; go 1.24.0
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.39.0
+# golang.org/x/sys v0.40.0
## explicit; go 1.24.0
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.38.0
+# golang.org/x/term v0.39.0
## explicit; go 1.24.0
golang.org/x/term
-# golang.org/x/text v0.32.0
+# golang.org/x/text v0.33.0
## explicit; go 1.24.0
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
@@ -797,7 +784,7 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.14.0
## explicit; go 1.24.0
golang.org/x/time/rate
-# golang.org/x/tools v0.39.0
+# golang.org/x/tools v0.40.0
## explicit; go 1.24.0
golang.org/x/tools/go/ast/edge
golang.org/x/tools/go/ast/inspector
@@ -822,15 +809,15 @@ golang.org/x/tools/internal/versions
## explicit; go 1.18
golang.org/x/xerrors
golang.org/x/xerrors/internal
-# google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8
+# google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217
## explicit; go 1.24.0
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b
## explicit; go 1.24.0
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.77.0
+# google.golang.org/grpc v1.78.0
## explicit; go 1.24.0
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -893,7 +880,7 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.10
+# google.golang.org/protobuf v1.36.11
## explicit; go 1.23
google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/protojson
@@ -949,7 +936,7 @@ gopkg.in/tomb.v1
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# k8s.io/api v0.34.2
+# k8s.io/api v0.34.3
## explicit; go 1.24.0
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1alpha1
@@ -1068,7 +1055,7 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/client-go v0.34.2
+# k8s.io/client-go v0.34.3
## explicit; go 1.24.0
k8s.io/client-go/applyconfigurations
k8s.io/client-go/applyconfigurations/admissionregistration/v1