diff --git a/config/components/manager/controller_manager_config.yaml b/config/components/manager/controller_manager_config.yaml index 618353c0fe..b99132b119 100644 --- a/config/components/manager/controller_manager_config.yaml +++ b/config/components/manager/controller_manager_config.yaml @@ -53,10 +53,12 @@ integrations: - "kubeflow.org/tfjob" - "kubeflow.org/xgboostjob" - "workload.codeflare.dev/appwrapper" + - "codeflare.dev/appwrapper" # - "pod" # - "deployment" # requires enabling pod integration # - "statefulset" # requires enabling pod integration # - "leaderworkerset.x-k8s.io/leaderworkerset" # requires enabling pod integration +# - "sparkoperator.k8s.io/sparkapplication" # requires enabling pod integration # externalFrameworks: # - "Foo.v1.example.com" # podOptions: diff --git a/config/components/manager/manager.yaml b/config/components/manager/manager.yaml index 7b3fd27791..4648f0c168 100644 --- a/config/components/manager/manager.yaml +++ b/config/components/manager/manager.yaml @@ -24,7 +24,7 @@ spec: - /manager args: - "--zap-log-level=2" - imagePullPolicy: Always + imagePullPolicy: IfNotPresent image: controller:latest name: manager securityContext: diff --git a/config/components/rbac/role.yaml b/config/components/rbac/role.yaml index 15483e27dc..3ff99d95c0 100644 --- a/config/components/rbac/role.yaml +++ b/config/components/rbac/role.yaml @@ -324,6 +324,31 @@ rules: - get - list - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/finalizers + verbs: + - get + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + verbs: + - get + - patch + - update - apiGroups: - workload.codeflare.dev resources: diff --git a/config/components/webhook/manifests.yaml b/config/components/webhook/manifests.yaml index f9fe92e071..d734ce68b5 100644 --- a/config/components/webhook/manifests.yaml +++ b/config/components/webhook/manifests.yaml @@ -253,6 +253,26 @@ webhooks: resources: - rayjobs sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + name: msparkapplication.kb.io + reinvocationPolicy: IfNeeded + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + resources: + - sparkapplications + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -596,6 +616,26 @@ webhooks: resources: - rayjobs sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + name: vsparkapplication.kb.io + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - sparkapplications + sideEffects: None - admissionReviewVersions: - v1 clientConfig: diff --git a/go.mod b/go.mod index 4453697833..68b1049bd7 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module sigs.k8s.io/kueue -go 1.23.0 +go 1.23.1 + +toolchain go1.23.4 require ( github.com/fsnotify/fsnotify v1.8.0 @@ -8,6 +10,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/json-iterator/go v1.1.12 github.com/kubeflow/mpi-operator v0.6.0 + github.com/kubeflow/spark-operator v0.0.0-00010101000000-000000000000 github.com/kubeflow/training-operator v1.8.1 github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 @@ -40,6 +43,8 @@ require ( sigs.k8s.io/yaml v1.4.0 ) +replace github.com/kubeflow/spark-operator => github.com/everpeace/spark-operator v0.0.0-20250120104544-7f87e6564ab4 + require ( cel.dev/expr v0.18.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -51,7 +56,7 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/chai2010/gettext-go v1.0.3 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -61,7 +66,7 @@ require ( github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-errors/errors v1.4.2 // indirect + github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -69,15 +74,16 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.2 // indirect github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect @@ -108,14 +114,14 @@ require ( go.etcd.io/etcd/api/v3 v3.5.16 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -130,10 +136,9 @@ require ( golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.28.0 // indirect - google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.2 // indirect google.golang.org/protobuf v1.36.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index eae333a7d5..f98cb90fcf 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= -github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= +github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -43,6 +43,8 @@ github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lSh github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/everpeace/spark-operator v0.0.0-20250120104544-7f87e6564ab4 h1:dRwvSig1BYOJLIak78SmUfUg7ctZm5eQ5Wb2PcMY0ZA= +github.com/everpeace/spark-operator v0.0.0-20250120104544-7f87e6564ab4/go.mod h1:ly0i0PujdsPFlUsBiLHJkBAqb8wXgJP3y4Kw1DbP12o= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -51,8 +53,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -73,14 +75,16 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -93,8 +97,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -234,22 +238,22 @@ go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -311,14 +315,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= -google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/controller/jobs/jobs.go b/pkg/controller/jobs/jobs.go index bfd87210f3..61ea32c22a 100644 --- a/pkg/controller/jobs/jobs.go +++ b/pkg/controller/jobs/jobs.go @@ -28,5 +28,6 @@ import ( _ "sigs.k8s.io/kueue/pkg/controller/jobs/pod" _ "sigs.k8s.io/kueue/pkg/controller/jobs/raycluster" _ "sigs.k8s.io/kueue/pkg/controller/jobs/rayjob" + _ "sigs.k8s.io/kueue/pkg/controller/jobs/sparkapplication" _ "sigs.k8s.io/kueue/pkg/controller/jobs/statefulset" ) diff --git a/pkg/controller/jobs/pod/pod_webhook.go b/pkg/controller/jobs/pod/pod_webhook.go index 67ed2ee496..19f7f9d7fe 100644 --- a/pkg/controller/jobs/pod/pod_webhook.go +++ b/pkg/controller/jobs/pod/pod_webhook.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -56,6 +57,20 @@ const ( SuspendedByParentAnnotation = "kueue.x-k8s.io/pod-suspending-parent" RoleHashAnnotation = "kueue.x-k8s.io/role-hash" RetriableInGroupAnnotation = "kueue.x-k8s.io/retriable-in-group" + + // pod annotation "kueue.x-k8s.io/skip-pod-integration-webhook=true" + // can forcefully skip the pod integration webhook. + // + // Why such annotation is needed? + // There is some controllers which mutates OwnerReference by webhook + // to its managing pods (e.g., SparkApplication controller). + // In such case, even though the CRD integration is enabled in Kueue, + // pod integration webhook might be called WITHOUT the OwnerReference. + // Then, pod integration webhook can not detect the pod is controlled + // by some CRD in the webhook call, consequently, it mistakenly mutates + // the pod integration information to the pods. + SkipWebhookAnnotationKey = "kueue.x-k8s.io/skip-pod-integration-webhook" + SkipWebhookAnnotationValue = "true" ) var ( @@ -139,6 +154,14 @@ func (w *PodWebhook) Default(ctx context.Context, obj runtime.Object) error { log := ctrl.LoggerFrom(ctx).WithName("pod-webhook") log.V(5).Info("Applying defaults") + if v, ok := pod.pod.Annotations[SkipWebhookAnnotationKey]; ok && v == SkipWebhookAnnotationValue { + log.V(5).Info(fmt.Sprintf( + "Skip applying defaults due to %s=%s annotation", SkipWebhookAnnotationKey, SkipWebhookAnnotationValue), + "pod", klog.KObj(pod.Object()), + ) + return nil + } + _, suspend := pod.pod.GetAnnotations()[SuspendedByParentAnnotation] if !suspend { // Namespace filtering diff --git a/pkg/controller/jobs/sparkapplication/sparkapplication_controller.go b/pkg/controller/jobs/sparkapplication/sparkapplication_controller.go new file mode 100644 index 0000000000..9b24bd1df8 --- /dev/null +++ b/pkg/controller/jobs/sparkapplication/sparkapplication_controller.go @@ -0,0 +1,461 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + "errors" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" + "sigs.k8s.io/kueue/pkg/controller/jobframework" + "sigs.k8s.io/kueue/pkg/podset" + + kfsparkapi "github.com/kubeflow/spark-operator/api/v1beta2" + kfsparkcommon "github.com/kubeflow/spark-operator/pkg/common" + kfsparkutil "github.com/kubeflow/spark-operator/pkg/util" +) + +var ( + gvk = kfsparkapi.SchemeGroupVersion.WithKind("SparkApplication") + + FrameworkName = "sparkoperator.k8s.io/sparkapplication" +) + +func init() { + utilruntime.Must(jobframework.RegisterIntegration(FrameworkName, jobframework.IntegrationCallbacks{ + SetupIndexes: SetupIndexes, + NewJob: NewJob, + NewReconciler: NewReconciler, + SetupWebhook: SetupSparkApplicationWebhook, + JobType: &kfsparkapi.SparkApplication{}, + AddToScheme: kfsparkapi.AddToScheme, + IsManagingObjectsOwner: isSparkApplication, + DependencyList: []string{"pod"}, + })) +} + +// +kubebuilder:rbac:groups="",resources=events,verbs=create;watch;update;patch +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications/finalizers,verbs=get;update +// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads/finalizers,verbs=update + +func NewJob() jobframework.GenericJob { + return &SparkApplication{} +} + +var NewReconciler = jobframework.NewGenericReconcilerFactory(NewJob) + +type SparkApplication kfsparkapi.SparkApplication + +type integrationMode string + +const ( + integrationModeAggregated integrationMode = "aggregated" + integrationModeExecutorDetached integrationMode = "executor-detached" + integrationModeUnknown integrationMode = "unknown" +) + +var _ jobframework.GenericJob = (*SparkApplication)(nil) + +func fromObject(obj runtime.Object) *SparkApplication { + return (*SparkApplication)(obj.(*kfsparkapi.SparkApplication)) +} + +func (s *SparkApplication) Finished() (string, bool, bool) { + failed := s.Status.AppState.State == kfsparkapi.ApplicationStateFailed + success := s.Status.AppState.State == kfsparkapi.ApplicationStateCompleted + finished := success || failed + if finished { + if failed { + return s.Status.AppState.ErrorMessage, false, true + } + return "", true, true + } + return "", false, false +} + +func (s *SparkApplication) GVK() schema.GroupVersionKind { + return gvk +} + +func (s *SparkApplication) IsActive() bool { + active := false + if s.Status.DriverInfo.PodName != "" { + active = true + } + mode, _ := s.integrationMode() + if mode == integrationModeAggregated { + numNonTerminatedPods := 0 + for _, state := range s.Status.ExecutorState { + if !kfsparkutil.IsExecutorTerminated(state) { + numNonTerminatedPods++ + } + } + active = (numNonTerminatedPods > 0) || active + } + return active +} + +func (s *SparkApplication) IsSuspended() bool { + return s.Spec.Suspend +} + +func (s *SparkApplication) Object() client.Object { + return (*kfsparkapi.SparkApplication)(s) +} + +func (s *SparkApplication) PodSets() ([]kueue.PodSet, error) { + driverPodSet, err := s.driverPodSet() + if err != nil { + return nil, err + } + + mode, integrationModeErr := s.integrationMode() + if integrationModeErr != nil { + return nil, integrationModeErr + } + + switch mode { + case integrationModeAggregated: + executorPodSet, err := s.executorPodSet() + if err != nil { + return nil, err + } + return []kueue.PodSet{*driverPodSet, *executorPodSet}, nil + case integrationModeExecutorDetached: + return []kueue.PodSet{*driverPodSet}, nil + default: + return nil, errors.New("it should never happen") + } +} + +func (s *SparkApplication) PodsReady() bool { + driverReady := kfsparkutil.IsDriverRunning(s.asSparkApp()) + if !driverReady { + return false + } + + switch mode, _ := s.integrationMode(); mode { + case integrationModeAggregated: + for _, executorState := range s.Status.ExecutorState { + if executorState != kfsparkapi.ExecutorStateRunning { + return false + } + } + return true + case integrationModeExecutorDetached: + return true + default: + return false + } +} + +func (s *SparkApplication) RestorePodSetsInfo(podSetsInfo []podset.PodSetInfo) bool { + if len(podSetsInfo) == 0 { + return false + } + changed := false + for _, info := range podSetsInfo { + if info.Name == kfsparkcommon.SparkRoleDriver { + ps, err := s.driverPodSet() + if err != nil { + continue + } + changed = podset.RestorePodSpec(&ps.Template.ObjectMeta, &ps.Template.Spec, info) || changed + continue + } + + mode, _ := s.integrationMode() + if info.Name == kfsparkcommon.SparkRoleExecutor && mode == integrationModeAggregated { + ps, err := s.executorPodSet() + if err != nil { + continue + } + changed = podset.RestorePodSpec(&ps.Template.ObjectMeta, &ps.Template.Spec, info) || changed + continue + } + } + return changed +} + +func (s *SparkApplication) RunWithPodSetsInfo(podSetsInfo []podset.PodSetInfo) error { + s.Spec.Suspend = false + for _, info := range podSetsInfo { + if info.Name == kfsparkcommon.SparkRoleDriver { + ps, err := s.driverPodSet() + if err != nil { + return err + } + if err := podset.Merge(&ps.Template.ObjectMeta, &ps.Template.Spec, info); err != nil { + return nil + } + continue + } + + mode, _ := s.integrationMode() + if info.Name == kfsparkcommon.SparkRoleExecutor && mode == integrationModeAggregated { + ps, err := s.executorPodSet() + if err != nil { + return err + } + if err := podset.Merge(&ps.Template.ObjectMeta, &ps.Template.Spec, info); err != nil { + return nil + } + continue + } + } + return nil +} + +func (s *SparkApplication) Suspend() { + s.Spec.Suspend = true +} + +func SetupIndexes(ctx context.Context, indexer client.FieldIndexer) error { + return jobframework.SetupWorkloadOwnerIndex(ctx, indexer, gvk) +} + +func isSparkApplication(owner *metav1.OwnerReference) bool { + return owner.Kind == "SparkApplication" && strings.HasPrefix(owner.APIVersion, kfsparkapi.GroupVersion.Group) +} + +func (s *SparkApplication) asSparkApp() *kfsparkapi.SparkApplication { + return (*kfsparkapi.SparkApplication)(s) +} + +func (s *SparkApplication) integrationMode() (integrationMode, *field.Error) { + if s.Spec.DynamicAllocation == nil { + return integrationModeUnknown, field.Required( + field.NewPath("spec", "dynamicAllocation"), + "kueue managed sparkapplication must set spec.dynamicAllocation explicitly (must not be nil)", + ) + } + if s.Spec.DynamicAllocation.Enabled { + return integrationModeExecutorDetached, nil + } else { + return integrationModeAggregated, nil + } +} + +func (s *SparkApplication) driverPodSet() (*kueue.PodSet, error) { + podTemplate, containerIndex, err := sparkPodSpecToPodSetTemplate(s.Spec.Driver.SparkPodSpec, field.NewPath("spec", "driver"), kfsparkcommon.SparkDriverContainerName) + if err != nil { + return nil, err + } + applySparkSpecFieldsToPodTemplate(s.Spec, containerIndex, podTemplate) + + // DriverSpec + // TODO: apply other fields in the spec. + // But, there is no problems because these fields are not interested in PodSet. + if s.Spec.Driver.CoreRequest != nil { + if q, err := resource.ParseQuantity(*s.Spec.Driver.CoreRequest); err != nil { + return nil, fmt.Errorf("spec.driver.coreRequest=%s can't parse: %w", *s.Spec.Driver.CoreRequest, err) + } else { + podTemplate.Spec.Containers[containerIndex].Resources.Requests[corev1.ResourceCPU] = q + } + } + if s.Spec.Driver.PriorityClassName != nil { + podTemplate.Spec.PriorityClassName = *s.Spec.Driver.PriorityClassName + } + + return &kueue.PodSet{ + Name: kfsparkcommon.SparkRoleDriver, + Count: int32(1), + Template: *podTemplate, + TopologyRequest: jobframework.PodSetTopologyRequest(&podTemplate.ObjectMeta, nil, nil, nil), + }, nil +} + +func (s *SparkApplication) executorPodSet() (*kueue.PodSet, error) { + podTemplate, containerIndex, err := sparkPodSpecToPodSetTemplate(s.Spec.Executor.SparkPodSpec, field.NewPath("spec", "executor"), kfsparkcommon.SparkExecutorContainerName) + if err != nil { + return nil, err + } + + applySparkSpecFieldsToPodTemplate(s.Spec, containerIndex, podTemplate) + + // Executor Spec + // TODO: apply other fields in the spec. + // But, there is no problems because these fields are not interested in PodSet. + if s.Spec.Executor.CoreRequest != nil { + if q, err := resource.ParseQuantity(*s.Spec.Executor.CoreRequest); err != nil { + // TODO: Log + } else { + podTemplate.Spec.Containers[containerIndex].Resources.Requests[corev1.ResourceCPU] = q + } + } + if s.Spec.Executor.PriorityClassName != nil { + podTemplate.Spec.PriorityClassName = *s.Spec.Executor.PriorityClassName + } + return &kueue.PodSet{ + Name: kfsparkcommon.SparkRoleExecutor, + Template: *podTemplate, + Count: s.executorCount(), + TopologyRequest: jobframework.PodSetTopologyRequest(&podTemplate.ObjectMeta, ptr.To(kfsparkcommon.LabelSparkExecutorID), nil, nil), + }, nil +} + +func sparkPodSpecToPodSetTemplate(sparkPodSpec kfsparkapi.SparkPodSpec, field *field.Path, targetContainerName string) (*corev1.PodTemplateSpec, int, error) { + spec, index, err := sparkPodSpecToPodSpec(sparkPodSpec, field, targetContainerName) + if err != nil { + return nil, -1, err + } + + return &corev1.PodTemplateSpec{ + ObjectMeta: sparkPodSpecToMetadata(sparkPodSpec), + Spec: *spec, + }, index, nil +} + +func sparkPodSpecToMetadata(sparkPodSpec kfsparkapi.SparkPodSpec) metav1.ObjectMeta { + podTemplate := &corev1.PodTemplateSpec{} + + if sparkPodSpec.Template != nil { + podTemplate = sparkPodSpec.Template + } + for k, v := range sparkPodSpec.Labels { + if podTemplate.Labels == nil { + podTemplate.Labels = map[string]string{} + } + podTemplate.Labels[k] = v + } + + for k, v := range sparkPodSpec.Annotations { + if podTemplate.Annotations == nil { + podTemplate.Annotations = map[string]string{} + } + podTemplate.Annotations[k] = v + } + + return podTemplate.ObjectMeta +} + +func sparkPodSpecToPodSpec(sparkPodSpec kfsparkapi.SparkPodSpec, field *field.Path, targetContainerName string) (*corev1.PodSpec, int, error) { + podTemplate := &corev1.PodTemplateSpec{} + + if sparkPodSpec.Template != nil { + podTemplate = sparkPodSpec.Template + } + + containerIndex := findContainer(&podTemplate.Spec, targetContainerName) + if containerIndex == -1 { + podTemplate.Spec.Containers = append(podTemplate.Spec.Containers, corev1.Container{ + Name: targetContainerName, + }) + containerIndex = len(podTemplate.Spec.Containers) - 1 + } + + if sparkPodSpec.CoreLimit != nil { + if q, err := resource.ParseQuantity(*sparkPodSpec.CoreLimit); err != nil { + return nil, -1, fmt.Errorf("%s=%s can't parse: %w", field.Child("coreLimit"), *sparkPodSpec.CoreLimit, err) + } else { + if podTemplate.Spec.Containers[containerIndex].Resources.Limits == nil { + podTemplate.Spec.Containers[containerIndex].Resources.Limits = corev1.ResourceList{} + } + podTemplate.Spec.Containers[containerIndex].Resources.Limits[corev1.ResourceCPU] = q + } + } + + if sparkPodSpec.Memory != nil { + if q, err := resource.ParseQuantity(*sparkPodSpec.Memory); err != nil { + return nil, -1, fmt.Errorf("%s=%s can't parse: %w", field.Child("memory"), *sparkPodSpec.Memory, err) + } else { + if podTemplate.Spec.Containers[containerIndex].Resources.Requests == nil { + podTemplate.Spec.Containers[containerIndex].Resources.Requests = corev1.ResourceList{} + } + podTemplate.Spec.Containers[containerIndex].Resources.Requests[corev1.ResourceMemory] = q + } + } + + if sparkPodSpec.GPU != nil { + qStr := fmt.Sprintf("%d", sparkPodSpec.GPU.Quantity) + if q, err := resource.ParseQuantity(qStr); err != nil { + return nil, -1, fmt.Errorf("%s=%s can't parse: %w", field.Child("gpu", "quantity"), qStr, err) + } else { + podTemplate.Spec.Containers[containerIndex].Resources.Limits[corev1.ResourceName(sparkPodSpec.GPU.Name)] = q + } + } + + if sparkPodSpec.Image != nil { + podTemplate.Spec.Containers[containerIndex].Image = *sparkPodSpec.Image + } + + if sparkPodSpec.Affinity != nil { + podTemplate.Spec.Affinity = sparkPodSpec.Affinity + } + + if sparkPodSpec.Tolerations != nil { + podTemplate.Spec.Tolerations = sparkPodSpec.Tolerations + } + + if len(sparkPodSpec.Sidecars) > 0 { + podTemplate.Spec.Containers = append(podTemplate.Spec.Containers, sparkPodSpec.Sidecars...) + } + + if len(sparkPodSpec.InitContainers) > 0 { + podTemplate.Spec.InitContainers = append(podTemplate.Spec.InitContainers, sparkPodSpec.InitContainers...) + } + + if sparkPodSpec.NodeSelector != nil { + podTemplate.Spec.NodeSelector = sparkPodSpec.NodeSelector + } + + // TODO: apply other fields in SparkPodSpec. + // But, there is no problems because these fields are not interested in PodSet. + + return &podTemplate.Spec, containerIndex, nil +} + +func applySparkSpecFieldsToPodTemplate(spec kfsparkapi.SparkApplicationSpec, targetContainerIndex int, podTemplate *corev1.PodTemplateSpec) { + if spec.Image != nil && podTemplate.Spec.Containers[targetContainerIndex].Image == "" { + podTemplate.Spec.Containers[targetContainerIndex].Image = *spec.Image + } + if spec.NodeSelector != nil && podTemplate.Spec.NodeSelector == nil { + podTemplate.Spec.NodeSelector = spec.NodeSelector + } + + // TODO: apply other fields in SparkApplicationSpec. + // But, there is no problems because these fields are not interested in PodSet. +} + +func (s *SparkApplication) executorCount() int32 { + return ptr.Deref(s.Spec.Executor.Instances, 1) +} + +func findContainer(podSpec *corev1.PodSpec, containerName string) int { + for i, c := range podSpec.Containers { + if c.Name == containerName { + return i + } + } + return -1 +} diff --git a/pkg/controller/jobs/sparkapplication/sparkapplication_controller_test.go b/pkg/controller/jobs/sparkapplication/sparkapplication_controller_test.go new file mode 100644 index 0000000000..6684d2755d --- /dev/null +++ b/pkg/controller/jobs/sparkapplication/sparkapplication_controller_test.go @@ -0,0 +1,608 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" + kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" + "sigs.k8s.io/kueue/pkg/constants" + "sigs.k8s.io/kueue/pkg/controller/jobframework" + utiltesting "sigs.k8s.io/kueue/pkg/util/testing" + podtesting "sigs.k8s.io/kueue/pkg/util/testingjobs/pod" + sparkapptesting "sigs.k8s.io/kueue/pkg/util/testingjobs/sparkapplication" + + kfsparkapi "github.com/kubeflow/spark-operator/api/v1beta2" + kfsparkcommon "github.com/kubeflow/spark-operator/pkg/common" +) + +var ( + baseAppWrapper = sparkapptesting. + MakeSparkApplication("job", "default"). + Queue("queue") + + expectedDriverPodWrapper = &podtesting.PodWrapper{Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "spec-driver-labels": "spec-driver-labels", + }, + Annotations: map[string]string{ + "spec-driver-annotations": "spec-driver-annotations", + }, + }, + Spec: corev1.PodSpec{ + PriorityClassName: *baseAppWrapper.Spec.Driver.PriorityClassName, + Containers: []corev1.Container{{ + Name: kfsparkcommon.SparkDriverContainerName, + Image: *baseAppWrapper.Spec.Image, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(*baseAppWrapper.Spec.Driver.CoreRequest), + corev1.ResourceMemory: resource.MustParse(*baseAppWrapper.Spec.Driver.Memory), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(*baseAppWrapper.Spec.Driver.CoreLimit), + }, + }, + }}, + Volumes: baseAppWrapper.Spec.Volumes, + }, + }} + expectedExecutorPodWrapper = &podtesting.PodWrapper{Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "spec-executor-template-labels": "spec-executor-template-labels", + "spec-executor-labels": "spec-executor-labels", + }, + Annotations: map[string]string{ + "spec-executor-annotations": "spec-executor-annotations", + }, + }, + Spec: corev1.PodSpec{ + Affinity: baseAppWrapper.Spec.Executor.Affinity, + NodeSelector: baseAppWrapper.Spec.Executor.NodeSelector, + Tolerations: baseAppWrapper.Spec.Executor.Tolerations, + InitContainers: baseAppWrapper.Spec.Executor.InitContainers, + PriorityClassName: *baseAppWrapper.Spec.Executor.PriorityClassName, + Containers: append([]corev1.Container{{ + Name: kfsparkcommon.SparkExecutorContainerName, + Image: *baseAppWrapper.Spec.Executor.Image, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(*baseAppWrapper.Spec.Executor.CoreRequest), + corev1.ResourceMemory: resource.MustParse(*baseAppWrapper.Spec.Executor.Memory), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(*baseAppWrapper.Spec.Executor.CoreLimit), + corev1.ResourceName(baseAppWrapper.Spec.Executor.GPU.Name): resource.MustParse(fmt.Sprintf("%d", baseAppWrapper.Spec.Executor.GPU.Quantity)), + }, + }, + }}, baseAppWrapper.Spec.Executor.Sidecars...), + }, + }} +) + +func TestPodSets(t *testing.T) { + testCases := map[string]struct { + sparkApp *kfsparkapi.SparkApplication + wantPodSets []kueue.PodSet + wantErr error + }{ + "normal case, dynamicAllocation=false": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + Obj(), + wantPodSets: []kueue.PodSet{{ + Name: kfsparkcommon.SparkRoleDriver, + Count: 1, + Template: toPodTemplateSpec(expectedDriverPodWrapper), + }, { + Name: kfsparkcommon.SparkRoleExecutor, + Count: 1, + Template: toPodTemplateSpec(expectedExecutorPodWrapper.Clone()), + }}, + }, + "normal case, dynamicAllocation=true": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + Obj(), + wantPodSets: []kueue.PodSet{{ + Name: kfsparkcommon.SparkRoleDriver, + Count: 1, + Template: toPodTemplateSpec(expectedDriverPodWrapper), + }}, + }, + + "with required topology annotation, dynamicAllocation=false": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + DriverAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + ExecutorAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + Obj(), + wantPodSets: []kueue.PodSet{{ + Name: kfsparkcommon.SparkRoleDriver, + Count: 1, + Template: toPodTemplateSpec( + expectedDriverPodWrapper.Clone().Annotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"), + ), + TopologyRequest: &kueue.PodSetTopologyRequest{ + Required: ptr.To("cloud.com/block"), + }, + }, { + Name: kfsparkcommon.SparkRoleExecutor, + Count: 1, + Template: toPodTemplateSpec( + expectedExecutorPodWrapper.Clone().Annotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"), + ), + TopologyRequest: &kueue.PodSetTopologyRequest{ + Required: ptr.To("cloud.com/block"), + PodIndexLabel: ptr.To(kfsparkcommon.LabelSparkExecutorID), + }, + }}, + }, + "with required topology annotation, dynamicAllocation=true": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + DriverAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + ExecutorAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + Obj(), + wantPodSets: []kueue.PodSet{{ + Name: kfsparkcommon.SparkRoleDriver, + Count: 1, + Template: toPodTemplateSpec( + expectedDriverPodWrapper.Clone().Annotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"), + ), + TopologyRequest: &kueue.PodSetTopologyRequest{ + Required: ptr.To("cloud.com/block"), + }, + }}, + }, + + "with preferred topology annotation, dynamicAllocation=false": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + DriverAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"). + ExecutorAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"). + Obj(), + wantPodSets: []kueue.PodSet{{ + Name: kfsparkcommon.SparkRoleDriver, + Count: 1, + Template: toPodTemplateSpec( + expectedDriverPodWrapper.Clone().Annotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"), + ), + TopologyRequest: &kueue.PodSetTopologyRequest{ + Preferred: ptr.To("cloud.com/block"), + }, + }, { + Name: kfsparkcommon.SparkRoleExecutor, + Count: 1, + Template: toPodTemplateSpec( + expectedExecutorPodWrapper.Clone().Annotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"), + ), + TopologyRequest: &kueue.PodSetTopologyRequest{ + Preferred: ptr.To("cloud.com/block"), + PodIndexLabel: ptr.To(kfsparkcommon.LabelSparkExecutorID), + }, + }}, + }, + "with preferred topology annotation, dynamicAllocation=true": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + DriverAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"). + ExecutorAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"). + Obj(), + wantPodSets: []kueue.PodSet{{ + Name: kfsparkcommon.SparkRoleDriver, + Count: 1, + Template: toPodTemplateSpec( + expectedDriverPodWrapper.Clone().Annotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"), + ), + TopologyRequest: &kueue.PodSetTopologyRequest{ + Preferred: ptr.To("cloud.com/block"), + }, + }}, + }, + + "invalid - can't parse resource": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + CoreLimit(kfsparkcommon.SparkRoleDriver, ptr.To("malformat")). + Obj(), + wantErr: fmt.Errorf(`spec.driver.coreLimit=malformat can't parse: %w`, resource.ErrFormatWrong), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + gotPodSets, gotErr := fromObject(tc.sparkApp).PodSets() + if gotErr != nil { + if tc.wantErr == nil { + t.Errorf("PodSets expected error (-want,+got):\n-nil\n+%s", gotErr.Error()) + } + if diff := cmp.Diff(tc.wantErr.Error(), gotErr.Error()); diff != "" { + t.Errorf("PodSets returned error (-want,+got):\n%s", diff) + } + return + } + if diff := cmp.Diff(tc.wantPodSets, gotPodSets); diff != "" { + t.Errorf("pod sets mismatch (-want +got):\n%s", diff) + } + }) + } +} + +var ( + jobCmpOpts = []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(kfsparkapi.SparkApplication{}, "TypeMeta", "ObjectMeta"), + } + workloadCmpOpts = []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(kueue.Workload{}, "TypeMeta"), + cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name", "Labels", "ResourceVersion", "OwnerReferences", "Finalizers"), + cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), + cmpopts.IgnoreFields(kueue.PodSet{}, "Template"), + } +) + +func TestReconciler(t *testing.T) { + baseWPCWrapper := utiltesting.MakeWorkloadPriorityClass("test-wpc"). + PriorityValue(100) + driverPCWrapper := utiltesting.MakePriorityClass("driver-priority-class"). + PriorityValue(200) + executorPCWrapper := utiltesting.MakePriorityClass("executor-priority-class").PriorityValue(100) + + testNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns", + Labels: map[string]string{ + "kubernetes.io/metadata.name": "ns", + }, + }, + } + + testCases := map[string]struct { + reconcilerOptions []jobframework.Option + sparkApp *kfsparkapi.SparkApplication + priorityClasses []client.Object + wantSparkApp *kfsparkapi.SparkApplication + wantWorkloads []kueue.Workload + wantErr error + }{ + // dynamicAllocation=false => integrationModeExecutorAggregated + // - SparkApplication Workload managed both driver and executor pods + "dynamicAllocation=false, workload is created with queue and priorityClass": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(driverPCWrapper.Name). + Priority(driverPCWrapper.Value). + PriorityClassSource(constants.PodPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1).Obj(), + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleExecutor, int(1)).Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + "dynamicAllocation=false, workload is created with queue, priorityClass and workloadPriorityClass": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + WorkloadPriorityClass(baseWPCWrapper.Name). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + WorkloadPriorityClass(baseWPCWrapper.Name). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(baseWPCWrapper.Name). + Priority(baseWPCWrapper.Value). + PriorityClassSource(constants.WorkloadPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1).Obj(), + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleExecutor, 1).Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + baseWPCWrapper.Obj(), driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + "dynamicAllocation=false, workload is created with queue, priorityClass and required topology request": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + DriverAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + ExecutorAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + DriverAnnotation( + kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block", + ). + ExecutorAnnotation( + kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block", + ). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(driverPCWrapper.Name). + Priority(driverPCWrapper.Value). + PriorityClassSource(constants.PodPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1). + RequiredTopologyRequest("cloud.com/block"). + Obj(), + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleExecutor, 1). + RequiredTopologyRequest("cloud.com/block"). + PodIndexLabel(ptr.To(kfsparkcommon.LabelSparkExecutorID)). + Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + "dynamicAllocation=false, workload is created with queue, priorityClass and preferred topology request": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + DriverAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"). + ExecutorAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block"). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(false). + ExecutorInstances(1). + DriverAnnotation( + kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block", + ). + ExecutorAnnotation( + kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block", + ). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(driverPCWrapper.Name). + Priority(driverPCWrapper.Value). + PriorityClassSource(constants.PodPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1). + PreferredTopologyRequest("cloud.com/block"). + Obj(), + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleExecutor, 1). + PreferredTopologyRequest("cloud.com/block"). + PodIndexLabel(ptr.To(kfsparkcommon.LabelSparkExecutorID)). + Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + + // dynamicAllocation=true => integrationModeExecutorDetached + // - only driver will be managed in SparkApplication Workloads + // - executor pods are managed by pod-integration (Workloads per executor pods) + "dynamicAllocation=true, workload is created with queue and priorityClass": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(driverPCWrapper.Name). + Priority(driverPCWrapper.Value). + PriorityClassSource(constants.PodPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1).Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + "dynamicAllocation=true, workload is created with queue, priorityClass and workloadPriorityClass": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + WorkloadPriorityClass(baseWPCWrapper.Name). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + WorkloadPriorityClass(baseWPCWrapper.Name). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(baseWPCWrapper.Name). + Priority(baseWPCWrapper.Value). + PriorityClassSource(constants.WorkloadPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1).Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + baseWPCWrapper.Obj(), driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + "dynamicAllocation=true, workload is created with queue, priorityClass and required topology request": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + DriverAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + ExecutorAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block"). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + DriverAnnotation( + kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block", + ). + ExecutorAnnotation( + kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.com/block", + ). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(driverPCWrapper.Name). + Priority(driverPCWrapper.Value). + PriorityClassSource(constants.PodPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1). + RequiredTopologyRequest("cloud.com/block"). + Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + "dynamicAllocation=true, workload is created with queue, priorityClass and preferred topology request": { + sparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + DriverAnnotation( + kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block", + ). + ExecutorAnnotation( + kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block", + ). + Obj(), + wantSparkApp: baseAppWrapper.Clone(). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + DriverAnnotation( + kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block", + ). + ExecutorAnnotation( + kueuealpha.PodSetPreferredTopologyAnnotation, "cloud.com/block", + ). + Suspend(true). + Obj(), + wantWorkloads: []kueue.Workload{ + *utiltesting.MakeWorkload("job", "default"). + Queue("queue"). + PriorityClass(driverPCWrapper.Name). + Priority(driverPCWrapper.Value). + PriorityClassSource(constants.PodPriorityClassSource). + PodSets( + *utiltesting.MakePodSet(kfsparkcommon.SparkRoleDriver, 1). + PreferredTopologyRequest("cloud.com/block"). + Obj(), + ).Obj(), + }, + priorityClasses: []client.Object{ + driverPCWrapper.Obj(), executorPCWrapper.Obj(), + }, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + ctx, _ := utiltesting.ContextWithLog(t) + clientBuilder := utiltesting.NewClientBuilder(kfsparkapi.AddToScheme) + if err := SetupIndexes(ctx, utiltesting.AsIndexer(clientBuilder)); err != nil { + t.Fatalf("Could not setup indexes: %v", err) + } + + objs := append(tc.priorityClasses, tc.sparkApp, testNamespace) + kClient := clientBuilder.WithObjects(objs...).Build() + recorder := record.NewBroadcaster().NewRecorder(kClient.Scheme(), corev1.EventSource{Component: "test"}) + + reconciler := NewReconciler(kClient, recorder, tc.reconcilerOptions...) + + jobKey := client.ObjectKeyFromObject(tc.sparkApp) + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: jobKey, + }) + if diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("Reconcile returned error (-want,+got):\n%s", diff) + } + + var gotSparkApp kfsparkapi.SparkApplication + if err := kClient.Get(ctx, jobKey, &gotSparkApp); err != nil { + t.Fatalf("Could not get Job after reconcile: %v", err) + } + if diff := cmp.Diff(tc.wantSparkApp, &gotSparkApp, jobCmpOpts...); diff != "" { + t.Errorf("SparkApplication after reconcile (-want,+got):\n%s", diff) + } + var gotWorkloads kueue.WorkloadList + if err := kClient.List(ctx, &gotWorkloads); err != nil { + t.Fatalf("Could not get Workloads after reconcile: %v", err) + } + if diff := cmp.Diff(tc.wantWorkloads, gotWorkloads.Items, workloadCmpOpts...); diff != "" { + t.Errorf("Workloads after reconcile (-want,+got):\n%s", diff) + } + }) + } +} + +func toPodTemplateSpec(pw *podtesting.PodWrapper) corev1.PodTemplateSpec { + return corev1.PodTemplateSpec{ + ObjectMeta: pw.ObjectMeta, + Spec: pw.Spec, + } +} diff --git a/pkg/controller/jobs/sparkapplication/sparkapplication_webhook.go b/pkg/controller/jobs/sparkapplication/sparkapplication_webhook.go new file mode 100644 index 0000000000..f582155205 --- /dev/null +++ b/pkg/controller/jobs/sparkapplication/sparkapplication_webhook.go @@ -0,0 +1,341 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + "fmt" + + apivalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/kueue/pkg/controller/constants" + "sigs.k8s.io/kueue/pkg/controller/jobframework" + "sigs.k8s.io/kueue/pkg/controller/jobframework/webhook" + podcontroller "sigs.k8s.io/kueue/pkg/controller/jobs/pod" + "sigs.k8s.io/kueue/pkg/queue" + + kfsparkapi "github.com/kubeflow/spark-operator/api/v1beta2" +) + +type SparkApplicationWebhook struct { + client client.Client + manageJobsWithoutQueueName bool + managedJobsNamespaceSelector labels.Selector + queues *queue.Manager +} + +// SetupSparkApplicationWebhook configures the webhook for SparkApplication. +func SetupSparkApplicationWebhook(mgr ctrl.Manager, opts ...jobframework.Option) error { + options := jobframework.ProcessOptions(opts...) + wh := &SparkApplicationWebhook{ + client: mgr.GetClient(), + manageJobsWithoutQueueName: options.ManageJobsWithoutQueueName, + managedJobsNamespaceSelector: options.ManagedJobsNamespaceSelector, + queues: options.Queues, + } + obj := &kfsparkapi.SparkApplication{} + return webhook.WebhookManagedBy(mgr). + For(obj). + WithMutationHandler(webhook.WithLosslessDefaulter(mgr.GetScheme(), obj, wh)). + WithValidator(wh). + Complete() +} + +// +kubebuilder:webhook:path=/mutate-sparkoperator-k8s-io-v1beta2-sparkapplication,mutating=true,failurePolicy=fail,reinvocationPolicy=IfNeeded,sideEffects=None,groups=sparkoperator.k8s.io,resources=sparkapplications,verbs=create,versions=v1beta2,name=msparkapplication.kb.io,admissionReviewVersions=v1 + +var _ admission.CustomDefaulter = &SparkApplicationWebhook{} + +// Default implements webhook.CustomDefaulter so a webhook will be registered for the type +func (wh *SparkApplicationWebhook) Default(ctx context.Context, obj runtime.Object) error { + sparkApp := fromObject(obj) + log := ctrl.LoggerFrom(ctx).WithName("sparkapplication-webhook").WithValues("sparkApplication", klog.KObj(sparkApp.Object())) + + jobframework.ApplyDefaultLocalQueue(sparkApp.Object(), wh.queues.DefaultLocalQueueExist) + suspend, err := jobframework.WorkloadShouldBeSuspended( + ctx, sparkApp.Object(), wh.client, wh.manageJobsWithoutQueueName, wh.managedJobsNamespaceSelector, + ) + if err != nil { + return err + } + + if !suspend { + return nil + } + + log.V(5).Info("Applying defaults") + ctrl.LoggerInto(ctx, log) + + sparkApp.Suspend() + wh.setDefaultsToDriverSpec(ctx, sparkApp) + wh.setDefaultsToExecutorSpec(ctx, sparkApp) + + return nil +} + +func (wh *SparkApplicationWebhook) setDefaultsToDriverSpec(ctx context.Context, sparkApp *SparkApplication) { + log := ctrl.LoggerFrom(ctx) + + defaultAnnotations := map[string]string{ + podcontroller.SkipWebhookAnnotationKey: podcontroller.SkipWebhookAnnotationValue, + } + + log.V(5).Info("Setting defaults to spec.driver.annotations", "defaults", defaultAnnotations) + if len(defaultAnnotations) > 0 && sparkApp.Spec.Driver.Annotations == nil { + sparkApp.Spec.Driver.Annotations = make(map[string]string, len(defaultAnnotations)) + } + + for k, v := range defaultAnnotations { + sparkApp.Spec.Driver.Annotations[k] = v + } +} + +func (wh *SparkApplicationWebhook) setDefaultsToExecutorSpec(ctx context.Context, sparkApp *SparkApplication) { + log := ctrl.LoggerFrom(ctx) + + setDefaults := func( + defaultLabels map[string]string, + defaultAnnotations map[string]string, + ) { + if len(defaultLabels) > 0 { + log.V(5).Info("Setting defaults to spec.executor.labels", "defaults", defaultAnnotations) + if sparkApp.Spec.Executor.Labels == nil { + sparkApp.Spec.Executor.Labels = make(map[string]string, len(defaultLabels)) + } + for k, v := range defaultLabels { + sparkApp.Spec.Executor.Labels[k] = v + } + } + if len(defaultAnnotations) > 0 { + log.V(5).Info("Setting defaults to spec.executor.annotations", "defaults", defaultAnnotations) + if sparkApp.Spec.Executor.Annotations == nil { + sparkApp.Spec.Executor.Annotations = make(map[string]string, len(defaultAnnotations)) + } + for k, v := range defaultAnnotations { + sparkApp.Spec.Executor.Annotations[k] = v + } + } + } + + switch mode, _ := sparkApp.integrationMode(); mode { + case integrationModeAggregated: + setDefaults( + nil, + map[string]string{ + podcontroller.SkipWebhookAnnotationKey: podcontroller.SkipWebhookAnnotationValue, + }, + ) + + case integrationModeExecutorDetached: + setDefaults( + map[string]string{ + constants.QueueLabel: jobframework.QueueNameForObject(sparkApp.Object()), + }, + map[string]string{ + podcontroller.SuspendedByParentAnnotation: FrameworkName, + }, + ) + } +} + +// +kubebuilder:webhook:path=/validate-sparkoperator-k8s-io-v1beta2-sparkapplication,mutating=false,failurePolicy=fail,sideEffects=None,groups=sparkoperator.k8s.io,resources=sparkapplications,verbs=create;update,versions=v1beta2,name=vsparkapplication.kb.io,admissionReviewVersions=v1 + +var _ admission.CustomValidator = &SparkApplicationWebhook{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type +func (wh *SparkApplicationWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + sparkApp := fromObject(obj) + log := ctrl.LoggerFrom(ctx). + WithName("sparkapplication-webhook"). + WithValues("sparkApplication", klog.KObj(sparkApp.Object())) + ctx = ctrl.LoggerInto(ctx, log) + log.Info("Validating create") + + suspend, err := jobframework.WorkloadShouldBeSuspended( + ctx, sparkApp.Object(), wh.client, wh.manageJobsWithoutQueueName, wh.managedJobsNamespaceSelector, + ) + if err != nil { + return nil, err + } + + if !suspend { + return nil, nil + } + + return nil, wh.validateCreate(ctx, sparkApp).ToAggregate() +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type +func (wh *SparkApplicationWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + oldSparkApp := fromObject(oldObj) + newSparkApp := fromObject(newObj) + log := ctrl.LoggerFrom(ctx).WithName("sparkapplication-webhook"). + WithValues("sparkApplication", + klog.KObj(newSparkApp.Object()), + ) + ctx = ctrl.LoggerInto(ctx, log) + + newSuspend, err := jobframework.WorkloadShouldBeSuspended( + ctx, newSparkApp.Object(), wh.client, wh.manageJobsWithoutQueueName, wh.managedJobsNamespaceSelector, + ) + if err != nil { + return nil, err + } + if !newSuspend { + return nil, nil + } + + log.Info("Validating update") + var allErrs field.ErrorList + allErrs = append(allErrs, wh.validateUpdate(ctx, oldSparkApp, newSparkApp)...) + allErrs = append(allErrs, wh.validateCreate(ctx, newSparkApp)...) + return nil, allErrs.ToAggregate() +} + +func (wh *SparkApplicationWebhook) validateUpdate( + _ context.Context, oldSparkApp, newSparkApp *SparkApplication, +) field.ErrorList { + allErrs := jobframework.ValidateJobOnUpdate(oldSparkApp, newSparkApp) + + // unknown integration error will be captured in validateCreate() + newMode, _ := newSparkApp.integrationMode() + + // suspended, integrationMode is Aggregated + // --> spec is locked execept for spec + if !oldSparkApp.IsSuspended() && !newSparkApp.IsSuspended() && newMode == integrationModeAggregated { + allErrs = append(allErrs, apivalidation.ValidateImmutableField( + newSparkApp.Spec.Executor.Instances, + oldSparkApp.Spec.Executor.Instances, + field.NewPath("spec", "executor", "instances"), + )...) + // // immutable exclude Spec.Suspend when integrationModeAggregated + // // compare with making both suspended + // newSparkApp.Suspend() + // oldSparkApp.Suspend() + // allErrs = append(allErrs, apivalidation.ValidateImmutableField( + // newSparkApp.Spec, oldSparkApp.Spec, + // field.NewPath("spec"), + // )...) + } + + return allErrs +} + +func (wh *SparkApplicationWebhook) validateCreate(_ context.Context, sparkApp *SparkApplication) field.ErrorList { + allErrs := jobframework.ValidateJobOnCreate(sparkApp) + + allErrs = append(allErrs, + expectStringValue( + sparkApp.Spec.Driver.Annotations[podcontroller.SkipWebhookAnnotationKey], + podcontroller.SkipWebhookAnnotationValue, + field.NewPath("spec", "driver", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), + )..., + ) + allErrs = append(allErrs, + expectKeyNotExists( + sparkApp.Spec.Driver.Annotations, + podcontroller.SuspendedByParentAnnotation, + field.NewPath("spec", "driver", "annotations").Key(podcontroller.SuspendedByParentAnnotation), + )..., + ) + mode, err := sparkApp.integrationMode() + if err != nil { + allErrs = append(allErrs, err) + } + switch mode { + case integrationModeAggregated: + allErrs = append(allErrs, + expectStringValue( + sparkApp.Spec.Executor.Annotations[podcontroller.SkipWebhookAnnotationKey], + podcontroller.SkipWebhookAnnotationValue, + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), + )..., + ) + allErrs = append(allErrs, + expectKeyNotExists( + sparkApp.Spec.Executor.Annotations, + podcontroller.SuspendedByParentAnnotation, + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SuspendedByParentAnnotation), + )..., + ) + if sparkApp.Spec.Executor.Instances == nil { + allErrs = append(allErrs, + field.Required( + field.NewPath("spec", "executor", "instances"), + "must not be nil", + ), + ) + } + case integrationModeExecutorDetached: + allErrs = append(allErrs, + expectKeyNotExists( + sparkApp.Spec.Executor.Annotations, + podcontroller.SkipWebhookAnnotationKey, + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), + )..., + ) + allErrs = append(allErrs, + expectStringValue( + sparkApp.Spec.Executor.Annotations[podcontroller.SuspendedByParentAnnotation], + FrameworkName, + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SuspendedByParentAnnotation), + )..., + ) + if sparkApp.Spec.DynamicAllocation.MaxExecutors == nil { + allErrs = append(allErrs, + field.Required( + field.NewPath("spec", "dynamicAllocation", "maxExecutors"), + "must not be nil", + ), + ) + } + + } + + return allErrs +} + +func expectStringValue(actual, expected string, path *field.Path) field.ErrorList { + var allErrs field.ErrorList + if actual != expected { + allErrs = append(allErrs, field.Invalid(path, actual, fmt.Sprintf("must be %s", expected))) + } + return allErrs +} + +func expectKeyNotExists(targetMap map[string]string, key string, path *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if v, ok := targetMap[key]; ok { + allErrs = append(allErrs, + field.Invalid(path, v, "must not exist"), + ) + } + + return allErrs +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type +func (wh *SparkApplicationWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} diff --git a/pkg/controller/jobs/sparkapplication/sparkapplication_webhook_test.go b/pkg/controller/jobs/sparkapplication/sparkapplication_webhook_test.go new file mode 100644 index 0000000000..56f3260fc5 --- /dev/null +++ b/pkg/controller/jobs/sparkapplication/sparkapplication_webhook_test.go @@ -0,0 +1,373 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + + apivalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" + + "sigs.k8s.io/kueue/pkg/cache" + "sigs.k8s.io/kueue/pkg/controller/constants" + podcontroller "sigs.k8s.io/kueue/pkg/controller/jobs/pod" + "sigs.k8s.io/kueue/pkg/features" + "sigs.k8s.io/kueue/pkg/queue" + utiltesting "sigs.k8s.io/kueue/pkg/util/testing" + sparkapptesting "sigs.k8s.io/kueue/pkg/util/testingjobs/sparkapplication" + + kfsparkapi "github.com/kubeflow/spark-operator/api/v1beta2" +) + +func TestDefaults(t *testing.T) { + baseApp := sparkapptesting.MakeSparkApplication("job", "default") + expectedBaseApp := sparkapptesting.MakeSparkApplication("job", "default"). + DriverAnnotation( + podcontroller.SkipWebhookAnnotationKey, + podcontroller.SkipWebhookAnnotationValue, + ).Suspend(true) + + testCases := map[string]struct { + sparkApp *kfsparkapi.SparkApplication + manageJobsWithoutQueueName bool + localQueueDefaulting bool + defaultLqExist bool + want *kfsparkapi.SparkApplication + }{ + "unmanaged": { + sparkApp: baseApp.Clone().Obj(), + want: baseApp.Clone().Obj(), + }, + "unmanaged: LocalQueueDefaulting=true, DefaultLocalQueue not exist": { + sparkApp: baseApp.Clone().Obj(), + want: baseApp.Clone().Obj(), + localQueueDefaulting: true, + }, + "managed: LocalQueueDefaulting=true, DefaultLocalQueue exists": { + sparkApp: baseApp.Clone().Obj(), + want: expectedBaseApp.Clone().Queue("default").Obj(), + localQueueDefaulting: true, + defaultLqExist: true, + }, + "managed: manageJobsWithoutQueueName=true, LocalQueueDefaulting=false, default LocalQueue not exist": { + sparkApp: baseApp.Clone().Obj(), + want: expectedBaseApp.Clone().Obj(), + manageJobsWithoutQueueName: true, + }, + "managed: manageJobsWithoutQueueName=true, LocalQueueDefaulting=true, default LocalQueue not exist": { + sparkApp: baseApp.Clone().Obj(), + want: expectedBaseApp.Clone().Obj(), + manageJobsWithoutQueueName: true, + localQueueDefaulting: true, + }, + "managed: manageJobsWithoutQueueName=true, LocalQueueDefaulting=true, default LocalQueue exists": { + sparkApp: baseApp.Clone().Obj(), + want: expectedBaseApp.Clone().Queue("default").Obj(), + manageJobsWithoutQueueName: true, + localQueueDefaulting: true, + defaultLqExist: true, + }, + "managed: with queue, dynamicAllocation=nil(integrationMode=Unknown)": { + sparkApp: baseApp.Clone().Queue("queue").Obj(), + want: expectedBaseApp.Clone().Queue("queue").Obj(), + }, + "managed: with queue, dynamicAllocation.Enabled=true(integrationMode=ExecutorDetached)": { + sparkApp: baseApp.Clone().Queue("queue").DynamicAllocation(true).Obj(), + want: expectedBaseApp.Clone().Queue("queue").DynamicAllocation(true). + ExecutorLabel( + constants.QueueLabel, "queue", + ). + ExecutorAnnotation( + podcontroller.SuspendedByParentAnnotation, FrameworkName, + ).Obj(), + }, + "managed: with queue, dynamicAllocation.Enabled=false(integrationMode=Aggregated)": { + sparkApp: baseApp.Clone().Queue("queue").DynamicAllocation(false).Obj(), + want: expectedBaseApp.Clone().Queue("queue").DynamicAllocation(false). + ExecutorAnnotation( + podcontroller.SkipWebhookAnnotationKey, + podcontroller.SkipWebhookAnnotationValue, + ).Obj(), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + features.SetFeatureGateDuringTest(t, features.ManagedJobsNamespaceSelector, false) + features.SetFeatureGateDuringTest(t, features.LocalQueueDefaulting, tc.localQueueDefaulting) + ctx, _ := utiltesting.ContextWithLog(t) + + builder := utiltesting.NewClientBuilder() + cli := builder.Build() + cqCache := cache.New(cli) + queueManager := queue.NewManager(cli, cqCache) + if tc.defaultLqExist { + if err := queueManager.AddLocalQueue(ctx, utiltesting.MakeLocalQueue("default", "default"). + ClusterQueue("cluster-queue").Obj()); err != nil { + t.Fatalf("failed to create default local queue: %s", err) + } + } + + wh := &SparkApplicationWebhook{ + client: cli, + manageJobsWithoutQueueName: tc.manageJobsWithoutQueueName, + queues: queueManager, + } + if err := wh.Default(ctx, tc.sparkApp); err != nil { + t.Errorf("failed to set defaults for sparkoperator.k8s.io/v1beta2/sparkapplication: %s", err) + } + if diff := cmp.Diff(tc.want, tc.sparkApp); len(diff) != 0 { + t.Errorf("Default() mismatch (-want,+got):\n%s", diff) + } + }) + } +} + +func TestValidateCreate(t *testing.T) { + baseApp := sparkapptesting.MakeSparkApplication("job", "default") + validAppAggregated := baseApp.Clone(). + Queue("queue"). + DynamicAllocation(false). + Suspend(true). + ExecutorInstances(1). + DriverAnnotation( + podcontroller.SkipWebhookAnnotationKey, podcontroller.SkipWebhookAnnotationValue, + ). + ExecutorAnnotation( + podcontroller.SkipWebhookAnnotationKey, podcontroller.SkipWebhookAnnotationValue, + ) + validAppDetached := baseApp.Clone(). + Queue("queue"). + DynamicAllocation(true). + DynamicAllocationMaxExecutor(1). + Suspend(true). + DriverAnnotation( + podcontroller.SkipWebhookAnnotationKey, podcontroller.SkipWebhookAnnotationValue, + ). + ExecutorLabel( + constants.QueueLabel, "queue", + ). + ExecutorAnnotation( + podcontroller.SuspendedByParentAnnotation, FrameworkName, + ) + + testCases := map[string]struct { + sparkApp *kfsparkapi.SparkApplication + manageJobsWithoutQueueName bool + wantErr error + }{ + "valid: unmanaged": { + sparkApp: baseApp.Clone().Obj(), + }, + "invalid: managed, dynamicAllocation=nil": { + sparkApp: baseApp.Clone().Queue("queue").Obj(), + wantErr: field.ErrorList{ + field.Invalid(field.NewPath("spec", "driver", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), "", "must be true"), + field.Required(field.NewPath("spec", "dynamicAllocation"), "kueue managed sparkapplication must set spec.dynamicAllocation explicitly (must not be nil)"), + }.ToAggregate(), + }, + + "valid: managed, dynamicAllocation=true(integrationMode=ExecutorDetached)": { + sparkApp: validAppDetached.Clone().Obj(), + }, + "invalid: managed, dynamicAllocation=true(integrationMode=ExecutorDetached)": { + sparkApp: baseApp.Clone().Queue("queue").DynamicAllocation(true). + DriverAnnotation( + podcontroller.SuspendedByParentAnnotation, "", + ). + ExecutorAnnotation( + podcontroller.SkipWebhookAnnotationKey, "", + ). + Obj(), + wantErr: field.ErrorList{ + field.Invalid( + field.NewPath("spec", "driver", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), "", "must be true", + ), + field.Invalid( + field.NewPath("spec", "driver", "annotations").Key(podcontroller.SuspendedByParentAnnotation), "", "must not exist", + ), + field.Invalid( + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), "", "must not exist", + ), + field.Invalid( + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SuspendedByParentAnnotation), + "", "must be sparkoperator.k8s.io/sparkapplication", + ), + field.Required( + field.NewPath("spec", "dynamicAllocation", "maxExecutors"), "must not be nil", + ), + }.ToAggregate(), + }, + + "valid: managed, dynamicAllocation=false(integrationMode=Aggregated)": { + sparkApp: validAppAggregated.Clone().Obj(), + }, + "invalid: managed, dynamicAllocation=false(integrationMode=Aggregated)": { + sparkApp: baseApp.Clone().Queue("queue").DynamicAllocation(false). + DriverAnnotation( + podcontroller.SuspendedByParentAnnotation, "", + ). + ExecutorAnnotation( + podcontroller.SuspendedByParentAnnotation, "", + ). + Obj(), + wantErr: field.ErrorList{ + field.Invalid( + field.NewPath("spec", "driver", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), "", "must be true", + ), + field.Invalid( + field.NewPath("spec", "driver", "annotations").Key(podcontroller.SuspendedByParentAnnotation), "", "must not exist", + ), + field.Invalid( + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SkipWebhookAnnotationKey), "", "must be true", + ), + field.Invalid( + field.NewPath("spec", "executor", "annotations").Key(podcontroller.SuspendedByParentAnnotation), "", "must not exist", + ), + field.Required( + field.NewPath("spec", "executor", "instances"), "must not be nil", + ), + }.ToAggregate(), + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + wh := &SparkApplicationWebhook{} + _, result := wh.ValidateCreate(context.Background(), tc.sparkApp) + if diff := cmp.Diff(tc.wantErr, result); diff != "" { + t.Errorf("ValidateCreate() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestValidateUpdate(t *testing.T) { + baseApp := sparkapptesting.MakeSparkApplication("job", "default"). + DynamicAllocation(false). + ExecutorInstances(1). + DriverAnnotation( + podcontroller.SkipWebhookAnnotationKey, podcontroller.SkipWebhookAnnotationValue, + ). + ExecutorAnnotation( + podcontroller.SkipWebhookAnnotationKey, podcontroller.SkipWebhookAnnotationValue, + ) + + testCases := map[string]struct { + oldSparkApp *kfsparkapi.SparkApplication + newSparkApp *kfsparkapi.SparkApplication + wantErr error + }{ + "valid: unmanaged": { + oldSparkApp: baseApp.Clone(). + Obj(), + newSparkApp: baseApp.Clone(). + Suspend(true). + Obj(), + wantErr: nil, + }, + "valid: managed, suspend=true, nothing changed": { + oldSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue"). + Obj(), + newSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue"). + Obj(), + wantErr: nil, + }, + "valid: managed, suspend=true, update unrelated metadata": { + oldSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue"). + Obj(), + newSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue"). + Label("key", "value"). + Obj(), + wantErr: nil, + }, + "valid: managed, suspend=true, update Queue": { + oldSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue"). + Obj(), + newSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue2"). + Obj(), + wantErr: nil, + }, + "invalid: managed, suspend=true, update WorkloadPriorityClass": { + oldSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue"). + WorkloadPriorityClass("wp1"). + Obj(), + newSparkApp: baseApp.Clone(). + Suspend(true). + Queue("queue"). + WorkloadPriorityClass("wp2"). + Obj(), + wantErr: field.ErrorList{ + field.Invalid(field.NewPath("metadata", "labels").Key(constants.WorkloadPriorityClassLabel), "wp2", apivalidation.FieldImmutableErrorMsg), + }.ToAggregate(), + }, + "invalid: managed, suspended=false, update Queue": { + oldSparkApp: baseApp.Clone(). + Suspend(false). + Queue("queue"). + Obj(), + newSparkApp: baseApp.Clone(). + Suspend(false). + Queue("queue2"). + Obj(), + wantErr: field.ErrorList{ + field.Invalid(field.NewPath("metadata", "labels").Key(constants.QueueLabel), "queue2", apivalidation.FieldImmutableErrorMsg), + }.ToAggregate(), + }, + "invalid: managed, suspended=false, dynamicAllocation=false, update executor.instances": { + oldSparkApp: baseApp.Clone(). + Suspend(false). + Queue("queue"). + ExecutorInstances(1). + Obj(), + newSparkApp: baseApp.Clone(). + Suspend(false). + Queue("queue"). + ExecutorInstances(2). + Obj(), + wantErr: field.ErrorList{ + field.Invalid(field.NewPath("spec", "executor", "instances"), ptr.To(int32(2)), apivalidation.FieldImmutableErrorMsg), + }.ToAggregate(), + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + wh := &SparkApplicationWebhook{} + _, result := wh.ValidateUpdate(context.Background(), tc.oldSparkApp, tc.newSparkApp) + if diff := cmp.Diff(tc.wantErr, result); diff != "" { + t.Errorf("ValidateUpdate() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/util/testingjobs/sparkapplication/wrappers.go b/pkg/util/testingjobs/sparkapplication/wrappers.go new file mode 100644 index 0000000000..4e4c36bb19 --- /dev/null +++ b/pkg/util/testingjobs/sparkapplication/wrappers.go @@ -0,0 +1,229 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "sigs.k8s.io/kueue/pkg/controller/constants" + + kfsparkapi "github.com/kubeflow/spark-operator/api/v1beta2" + kfsparkcommon "github.com/kubeflow/spark-operator/pkg/common" +) + +// SparkApplicationWrapper wraps a SparkApplication. +type SparkApplicationWrapper struct{ kfsparkapi.SparkApplication } + +// MakeSparkApplication creates a wrapper for a suspended SparkApplication +func MakeSparkApplication(name, ns string) *SparkApplicationWrapper { + return &SparkApplicationWrapper{kfsparkapi.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Annotations: make(map[string]string, 1), + }, + Spec: kfsparkapi.SparkApplicationSpec{ + Suspend: false, + Image: ptr.To("spark:3.5.3"), + Mode: kfsparkapi.DeployModeCluster, + Type: kfsparkapi.SparkApplicationTypeScala, + MainClass: ptr.To("local:///opt/spark/examples/jars/spark-examples.jar"), + SparkVersion: "3.5.3", + Driver: kfsparkapi.DriverSpec{ + CoreRequest: ptr.To("1"), + PriorityClassName: ptr.To("driver-priority-class"), + SparkPodSpec: kfsparkapi.SparkPodSpec{ + CoreLimit: ptr.To("1"), + Memory: ptr.To("256Mi"), + SchedulerName: ptr.To("test"), + ServiceAccount: ptr.To("test"), + Labels: map[string]string{ + "spec-driver-labels": "spec-driver-labels", + }, + Annotations: map[string]string{ + "spec-driver-annotations": "spec-driver-annotations", + }, + }, + }, + Executor: kfsparkapi.ExecutorSpec{ + Instances: nil, + CoreRequest: ptr.To("1"), + PriorityClassName: ptr.To("executor-priority-class"), + SparkPodSpec: kfsparkapi.SparkPodSpec{ + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "spec-executor-template-labels": "spec-executor-template-labels", + }, + }, + }, + CoreLimit: ptr.To("1"), + Memory: ptr.To("256Mi"), + GPU: &kfsparkapi.GPUSpec{ + Name: "nvidia.com/gpu", + Quantity: 1, + }, + Image: ptr.To("executor-image"), + Labels: map[string]string{ + "spec-executor-labels": "spec-executor-labels", + }, + Annotations: map[string]string{ + "spec-executor-annotations": "spec-executor-annotations", + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{{ + Weight: 1, + Preference: corev1.NodeSelectorTerm{ + MatchFields: []corev1.NodeSelectorRequirement{{ + Key: "test", + Operator: corev1.NodeSelectorOpExists, + }}, + }, + }}, + }, + }, + Tolerations: []corev1.Toleration{{ + Key: "test", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }}, + SchedulerName: ptr.To("test"), + Sidecars: []corev1.Container{{Name: "sidecar", Image: "test"}}, + InitContainers: []corev1.Container{{Name: "initContainer", Image: "test"}}, + NodeSelector: map[string]string{ + "spec-driver-node-selector": "spec-driver-node-selector", + }, + ServiceAccount: ptr.To("test"), + }, + }, + }, + }} +} + +// Obj returns the inner Job. +func (w *SparkApplicationWrapper) Obj() *kfsparkapi.SparkApplication { + return &w.SparkApplication +} + +// Suspend updates the suspend status of the SparkApplication +func (w *SparkApplicationWrapper) Suspend(s bool) *SparkApplicationWrapper { + w.Spec.Suspend = s + return w +} + +// Queue updates the queue name of the SparkApplication +func (w *SparkApplicationWrapper) Queue(queue string) *SparkApplicationWrapper { + if w.Labels == nil { + w.Labels = make(map[string]string) + } + w.Labels[constants.QueueLabel] = queue + return w +} + +// ExecutorInstances updates the number of executor instances of the SparkApplication +func (w *SparkApplicationWrapper) ExecutorInstances(n int32) *SparkApplicationWrapper { + w.Spec.Executor.Instances = &n + return w +} + +// DynamicAllocation explicitly set enabled to dynamic allocation of the SparkApplication +func (w *SparkApplicationWrapper) DynamicAllocation(enabled bool) *SparkApplicationWrapper { + w.Spec.DynamicAllocation = &kfsparkapi.DynamicAllocation{Enabled: enabled} + return w +} + +// DynamicAllocationMaxExecutor explicitly set dynamicAllocation.maxExecutors of the SparkApplication +func (w *SparkApplicationWrapper) DynamicAllocationMaxExecutor(num int32) *SparkApplicationWrapper { + if w.Spec.DynamicAllocation == nil { + w.Spec.DynamicAllocation = &kfsparkapi.DynamicAllocation{} + } + w.Spec.DynamicAllocation.MaxExecutors = &num + return w +} + +// Label sets the label key and value +func (w *SparkApplicationWrapper) Label(key, value string) *SparkApplicationWrapper { + if w.Labels == nil { + w.Labels = make(map[string]string) + } + w.Labels[key] = value + return w +} + +// WorkloadPriorityClass updates SparkApplication workloadpriorityclass. +func (w *SparkApplicationWrapper) WorkloadPriorityClass(wpc string) *SparkApplicationWrapper { + if w.Labels == nil { + w.Labels = make(map[string]string) + } + w.Labels[constants.WorkloadPriorityClassLabel] = wpc + return w +} + +// DriverAnnotation sets the annotation to Spec.Driver.Annotations +func (w *SparkApplicationWrapper) DriverAnnotation(key, value string) *SparkApplicationWrapper { + if w.Spec.Driver.Annotations == nil { + w.Spec.Driver.Annotations = make(map[string]string, 1) + } + w.Spec.Driver.Annotations[key] = value + return w +} + +// ExecutorAnnotation sets the annotation to Spec.Executor.Annotations +func (w *SparkApplicationWrapper) ExecutorAnnotation(key, value string) *SparkApplicationWrapper { + if w.Spec.Executor.Annotations == nil { + w.Spec.Executor.Annotations = make(map[string]string, 1) + } + w.Spec.Executor.Annotations[key] = value + return w +} + +// DriverLabel sets the annotation to Spec.Driver.Labels +func (w *SparkApplicationWrapper) DriverLabel(key, value string) *SparkApplicationWrapper { + if w.Spec.Driver.Labels == nil { + w.Spec.Driver.Labels = make(map[string]string, 1) + } + w.Spec.Driver.Labels[key] = value + return w +} + +// ExecutorLabel sets the annotation to Spec.Executor.Labels +func (w *SparkApplicationWrapper) ExecutorLabel(key, value string) *SparkApplicationWrapper { + if w.Spec.Executor.Labels == nil { + w.Spec.Executor.Labels = make(map[string]string, 1) + } + w.Spec.Executor.Labels[key] = value + return w +} + +// CoreLimit sets the driver or executor spec.CoreLimit. +func (w *SparkApplicationWrapper) CoreLimit(role string, quantity *string) *SparkApplicationWrapper { + switch role { + case kfsparkcommon.SparkRoleDriver: + w.Spec.Driver.CoreLimit = quantity + case kfsparkcommon.SparkRoleExecutor: + w.Spec.Executor.CoreLimit = quantity + } + return w +} + +// Clone clones the SparkApplicationWrapper +func (w *SparkApplicationWrapper) Clone() *SparkApplicationWrapper { + return &SparkApplicationWrapper{SparkApplication: *w.DeepCopy()} +} diff --git a/vendor/github.com/chai2010/gettext-go/README.md b/vendor/github.com/chai2010/gettext-go/README.md index 9381bd1522..d766fab4d7 100644 --- a/vendor/github.com/chai2010/gettext-go/README.md +++ b/vendor/github.com/chai2010/gettext-go/README.md @@ -1,6 +1,5 @@ -- *赞助 BTC: 1Cbd6oGAUUyBi7X7MaR4np4nTmQZXVgkCW* -- *赞助 ETH: 0x623A3C3a72186A6336C79b18Ac1eD36e1c71A8a6* -- *Go语言付费QQ群: 1055927514* +- *Go语言QQ群: 102319854, 1055927514* +- *凹语言(凹读音“Wa”)(The Wa Programming Language): https://github.com/wa-lang/wa* ---- diff --git a/vendor/github.com/chai2010/gettext-go/plural/formula.go b/vendor/github.com/chai2010/gettext-go/plural/formula.go index 679a1cd50d..c92dc19ead 100644 --- a/vendor/github.com/chai2010/gettext-go/plural/formula.go +++ b/vendor/github.com/chai2010/gettext-go/plural/formula.go @@ -47,7 +47,7 @@ var formulaTable = map[string]func(n int) int{ return 0 }, fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int { - if n <= 1 { + if n == 1 { return 0 } return 1 diff --git a/vendor/github.com/chai2010/gettext-go/po/util.go b/vendor/github.com/chai2010/gettext-go/po/util.go index d8b3b0e254..1a0928b111 100644 --- a/vendor/github.com/chai2010/gettext-go/po/util.go +++ b/vendor/github.com/chai2010/gettext-go/po/util.go @@ -29,6 +29,9 @@ func decodePoString(text string) string { break } switch line[i+1] { + case 'r': // \\r -> \r + data = append(data, '\r') + i++ case 'n': // \\n -> \n data = append(data, '\n') i++ @@ -62,6 +65,8 @@ func encodePoString(text string) string { buf.WriteString(`\\`) case '"': buf.WriteString(`\"`) + case '\r': + buf.WriteString(`\r`) case '\n': buf.WriteString(`\n`) case '\t': @@ -96,6 +101,8 @@ func encodeCommentPoString(text string) string { buf.WriteString(`\\`) case '"': buf.WriteString(`\"`) + case '\r': + buf.WriteString(`\r`) case '\n': buf.WriteString(`\n`) case '\t': diff --git a/vendor/github.com/go-errors/errors/.travis.yml b/vendor/github.com/go-errors/errors/.travis.yml index 77a6bccf77..1dc296026a 100644 --- a/vendor/github.com/go-errors/errors/.travis.yml +++ b/vendor/github.com/go-errors/errors/.travis.yml @@ -2,7 +2,6 @@ language: go go: - "1.8.x" - - "1.10.x" - - "1.13.x" - - "1.14.x" + - "1.11.x" - "1.16.x" + - "1.21.x" diff --git a/vendor/github.com/go-errors/errors/README.md b/vendor/github.com/go-errors/errors/README.md index 3d78525940..558bc883ef 100644 --- a/vendor/github.com/go-errors/errors/README.md +++ b/vendor/github.com/go-errors/errors/README.md @@ -9,7 +9,7 @@ This is particularly useful when you want to understand the state of execution when an error was returned unexpectedly. It provides the type \*Error which implements the standard golang error -interface, so you can use this library interchangably with code that is +interface, so you can use this library interchangeably with code that is expecting a normal error return. Usage @@ -80,3 +80,5 @@ This package is licensed under the MIT license, see LICENSE.MIT for details. * v1.4.0 *BREAKING* v1.4.0 reverted all changes from v1.3.0 and is identical to v1.2.0 * v1.4.1 no code change, but now without an unnecessary cover.out file. * v1.4.2 performance improvement to ErrorStack() to avoid unnecessary work https://github.com/go-errors/errors/pull/40 +* v1.5.0 add errors.Join() and errors.Unwrap() copying the stdlib https://github.com/go-errors/errors/pull/40 +* v1.5.1 fix build on go1.13..go1.19 (broken by adding Join and Unwrap with wrong build constraints) diff --git a/vendor/github.com/go-errors/errors/error_1_13.go b/vendor/github.com/go-errors/errors/error_1_13.go index 0af2fc8065..34ab3e00eb 100644 --- a/vendor/github.com/go-errors/errors/error_1_13.go +++ b/vendor/github.com/go-errors/errors/error_1_13.go @@ -1,3 +1,4 @@ +//go:build go1.13 // +build go1.13 package errors @@ -6,14 +7,17 @@ import ( baseErrors "errors" ) -// find error in any wrapped error +// As finds the first error in err's tree that matches target, and if one is found, sets +// target to that error value and returns true. Otherwise, it returns false. +// +// For more information see stdlib errors.As. func As(err error, target interface{}) bool { return baseErrors.As(err, target) } // Is detects whether the error is equal to a given error. Errors // are considered equal by this function if they are matched by errors.Is -// or if their contained errors are matched through errors.Is +// or if their contained errors are matched through errors.Is. func Is(e error, original error) bool { if baseErrors.Is(e, original) { return true diff --git a/vendor/github.com/go-errors/errors/error_backward.go b/vendor/github.com/go-errors/errors/error_backward.go index 80b0695e7e..ff14c4bfa2 100644 --- a/vendor/github.com/go-errors/errors/error_backward.go +++ b/vendor/github.com/go-errors/errors/error_backward.go @@ -1,3 +1,4 @@ +//go:build !go1.13 // +build !go1.13 package errors @@ -55,3 +56,70 @@ func Is(e error, original error) bool { return false } + +// Disclaimer: functions Join and Unwrap are copied from the stdlib errors +// package v1.21.0. + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + var b []byte + for i, err := range e.errs { + if i > 0 { + b = append(b, '\n') + } + b = append(b, err.Error()...) + } + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +// +// Unwrap only calls a method of the form "Unwrap() error". +// In particular Unwrap does not unwrap errors returned by [Join]. +func Unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/github.com/go-errors/errors/join_unwrap_1_20.go b/vendor/github.com/go-errors/errors/join_unwrap_1_20.go new file mode 100644 index 0000000000..44df35ece9 --- /dev/null +++ b/vendor/github.com/go-errors/errors/join_unwrap_1_20.go @@ -0,0 +1,32 @@ +//go:build go1.20 +// +build go1.20 + +package errors + +import baseErrors "errors" + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +// +// For more information see stdlib errors.Join. +func Join(errs ...error) error { + return baseErrors.Join(errs...) +} + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +// +// Unwrap only calls a method of the form "Unwrap() error". +// In particular Unwrap does not unwrap errors returned by [Join]. +// +// For more information see stdlib errors.Unwrap. +func Unwrap(err error) error { + return baseErrors.Unwrap(err) +} diff --git a/vendor/github.com/go-errors/errors/join_unwrap_backward.go b/vendor/github.com/go-errors/errors/join_unwrap_backward.go new file mode 100644 index 0000000000..50c766976c --- /dev/null +++ b/vendor/github.com/go-errors/errors/join_unwrap_backward.go @@ -0,0 +1,71 @@ +//go:build !go1.20 +// +build !go1.20 + +package errors + +// Disclaimer: functions Join and Unwrap are copied from the stdlib errors +// package v1.21.0. + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + var b []byte + for i, err := range e.errs { + if i > 0 { + b = append(b, '\n') + } + b = append(b, err.Error()...) + } + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +// +// Unwrap only calls a method of the form "Unwrap() error". +// In particular Unwrap does not unwrap errors returned by [Join]. +func Unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/glog/README.md b/vendor/github.com/golang/glog/README.md new file mode 100644 index 0000000000..a4f73883b2 --- /dev/null +++ b/vendor/github.com/golang/glog/README.md @@ -0,0 +1,36 @@ +# glog + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/golang/glog)](https://pkg.go.dev/github.com/golang/glog) + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package [_glog_](https://github.com/google/glog). + +By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the `-vmodule` flag, the package also provides fine-grained +control over logging at the file level. + +The comment from `glog.go` introduces the ideas: + +Package _glog_ implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides the functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style loggingcontrolled by the `-v` and `-vmodule=file=2` flags. + +Basic examples: + +```go +glog.Info("Prepare to repel boarders") + +glog.Fatalf("Initialization failed: %s", err) +``` + +See the documentation for the V function for an explanation of these examples: + +```go +if glog.V(2) { + glog.Info("Starting transaction...") +} +glog.V(2).Infoln("Processed", nItems, "elements") +``` + +The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go new file mode 100644 index 0000000000..8c00e737a0 --- /dev/null +++ b/vendor/github.com/golang/glog/glog.go @@ -0,0 +1,777 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions that have a name matched by regex: +// +// (Info|Warning|Error|Fatal)(Context)?(Depth)?(f)? +// +// If Context is present, function takes context.Context argument. The +// context is used to pass through the Trace Context to log sinks that can make use +// of it. +// It is recommended to use the context variant of the functions over the non-context +// variants if a context is available to make sure the Trace Contexts are present +// in logs. +// +// If Depth is present, this function calls log from a different depth in the call stack. +// This enables a callee to emit logs that use the callsite information of its caller +// or any other callers in the stack. When depth == 0, the original callee's line +// information is emitted. When depth > 0, depth frames are skipped in the call stack +// and the final frame is treated like the original callee to Info. +// +// If 'f' is present, function formats according to a format specifier. +// +// This package also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// A comma-separated list of file and line numbers holding a logging +// statement, such as +// -log_backtrace_at=gopherflakes.go:234 +// A stack trace will be written to the Info log whenever execution +// hits one of these statements. (Unlike with -vmodule, the ".go" +// must bepresent.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin with "gopher", +// and +// -vmodule=/path/to/glog/glog_test=1 +// sets the V level to 1 in the Go file /path/to/glog/glog_test.go. +// If a glob pattern contains a slash, it is matched against the full path, +// and the file name. Otherwise, the pattern is +// matched only against the file's basename. When both -vmodule and -v +// are specified, the -vmodule values take precedence for the specified +// modules. +package glog + +// This file contains the parts of the log package that are shared among all +// implementations (file, envelope, and appengine). + +import ( + "bytes" + "context" + "errors" + "fmt" + stdLog "log" + "os" + "reflect" + "runtime" + "runtime/pprof" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/glog/internal/logsink" + "github.com/golang/glog/internal/stackdump" +) + +var timeNow = time.Now // Stubbed out for testing. + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// ErrNoLog is the error we return if no log file has yet been created +// for the specified log type. +var ErrNoLog = errors.New("log file not yet created") + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [...]*OutputStats{ + logsink.Info: &Stats.Info, + logsink.Warning: &Stats.Warning, + logsink.Error: &Stats.Error, + logsink.Fatal: nil, +} + +// Level specifies a level of verbosity for V logs. The -v flag is of type +// Level and should be modified only through the flag.Value interface. +type Level int32 + +var metaPool sync.Pool // Pool of *logsink.Meta. + +// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a +// pointer, allocating a new one if necessary. (Returning the interface value +// directly avoids an allocation if there was an existing pointer in the pool.) +func metaPoolGet() (any, *logsink.Meta) { + if metai := metaPool.Get(); metai != nil { + return metai, metai.(*logsink.Meta) + } + meta := new(logsink.Meta) + return meta, meta +} + +type stack bool + +const ( + noStack = stack(false) + withStack = stack(true) +) + +func appendBacktrace(depth int, format string, args []any) (string, []any) { + // Capture a backtrace as a stackdump.Stack (both text and PC slice). + // Structured log sinks can extract the backtrace in whichever format they + // prefer (PCs or text), and Text sinks will include it as just another part + // of the log message. + // + // Use depth instead of depth+1 so that the backtrace always includes the + // log function itself - otherwise the reason for the trace appearing in the + // log may not be obvious to the reader. + dump := stackdump.Caller(depth) + + // Add an arg and an entry in the format string for the stack dump. + // + // Copy the "args" slice to avoid a rare but serious aliasing bug + // (corrupting the caller's slice if they passed it to a non-Fatal call + // using "..."). + format = format + "\n\n%v\n" + args = append(append([]any(nil), args...), dump) + + return format, args +} + +// logf acts as ctxlogf, but doesn't expect a context. +func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { + ctxlogf(nil, depth+1, severity, verbose, stack, format, args...) +} + +// ctxlogf writes a log message for a log function call (or log function wrapper) +// at the given depth in the current goroutine's stack. +func ctxlogf(ctx context.Context, depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { + now := timeNow() + _, file, line, ok := runtime.Caller(depth + 1) + if !ok { + file = "???" + line = 1 + } + + if stack == withStack || backtraceAt(file, line) { + format, args = appendBacktrace(depth+1, format, args) + } + + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Context: ctx, + Time: now, + File: file, + Line: line, + Depth: depth + 1, + Severity: severity, + Verbose: verbose, + Thread: int64(pid), + } + sinkf(meta, format, args...) + // Clear pointer fields so they can be garbage collected early. + meta.Context = nil + meta.Stack = nil + metaPool.Put(metai) +} + +func sinkf(meta *logsink.Meta, format string, args ...any) { + meta.Depth++ + n, err := logsink.Printf(meta, format, args...) + if stats := severityStats[meta.Severity]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(n)) + } + + if err != nil { + logsink.Printf(meta, "glog: exiting because of error: %s", err) + sinks.file.Flush() + os.Exit(2) + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// NewStandardLogger returns a Logger that writes to the Google logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge logsink.Severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + + // The depth below hard-codes details of how stdlog gets here. The alternative would be to walk + // up the stack looking for src/log/log.go but that seems like it would be + // unfortunately slow. + const stdLogDepth = 4 + + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: timeNow(), + File: file, + Line: line, + Depth: stdLogDepth, + Severity: logsink.Severity(lb), + Thread: int64(pid), + } + + format := "%s" + args := []any{text} + if backtraceAt(file, line) { + format, args = appendBacktrace(meta.Depth, format, args) + } + + sinkf(meta, format, args...) + metaPool.Put(metai) + + return len(b), nil +} + +// defaultFormat returns a fmt.Printf format specifier that formats its +// arguments as if they were passed to fmt.Print. +func defaultFormat(args []any) string { + n := len(args) + switch n { + case 0: + return "" + case 1: + return "%v" + } + + b := make([]byte, 0, n*3-1) + wasString := true // Suppress leading space. + for _, arg := range args { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + if wasString || isString { + b = append(b, "%v"...) + } else { + b = append(b, " %v"...) + } + wasString = isString + } + return string(b) +} + +// lnFormat returns a fmt.Printf format specifier that formats its arguments +// as if they were passed to fmt.Println. +func lnFormat(args []any) string { + if len(args) == 0 { + return "\n" + } + + b := make([]byte, 0, len(args)*3) + for range args { + b = append(b, "%v "...) + } + b[len(b)-1] = '\n' // Replace the last space with a newline. + return string(b) +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// +// if glog.V(2) { glog.Info("log this") } +// +// or +// +// glog.V(2).Info("log this") +// +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at most the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + return VDepth(1, level) +} + +// VDepth acts as V but uses depth to determine which call frame to check vmodule for. +// VDepth(0, level) is the same as V(level). +func VDepth(depth int, level Level) Verbose { + return Verbose(verboseEnabled(depth+1, level)) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...any) { + v.InfoDepth(1, args...) +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...any) { + if v { + logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) + } +} + +// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepthf(depth int, format string, args ...any) { + if v { + logf(depth+1, logsink.Info, true, noStack, format, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...any) { + if v { + logf(1, logsink.Info, true, noStack, lnFormat(args), args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...any) { + if v { + logf(1, logsink.Info, true, noStack, format, args...) + } +} + +// InfoContext is equivalent to the global InfoContext function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContext(ctx context.Context, args ...any) { + v.InfoContextDepth(ctx, 1, args...) +} + +// InfoContextf is equivalent to the global InfoContextf function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContextf(ctx context.Context, format string, args ...any) { + if v { + ctxlogf(ctx, 1, logsink.Info, true, noStack, format, args...) + } +} + +// InfoContextDepth is equivalent to the global InfoContextDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContextDepth(ctx context.Context, depth int, args ...any) { + if v { + ctxlogf(ctx, depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) + } +} + +// InfoContextDepthf is equivalent to the global InfoContextDepthf function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) { + if v { + ctxlogf(ctx, depth+1, logsink.Info, true, noStack, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...any) { + InfoDepth(1, args...) +} + +// InfoDepth calls Info from a different depth in the call stack. +// This enables a callee to emit logs that use the callsite information of its caller +// or any other callers in the stack. When depth == 0, the original callee's line +// information is emitted. When depth > 0, depth frames are skipped in the call stack +// and the final frame is treated like the original callee to Info. +func InfoDepth(depth int, args ...any) { + logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) +} + +// InfoDepthf acts as InfoDepth but with format string. +func InfoDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Info, false, noStack, format, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...any) { + logf(1, logsink.Info, false, noStack, lnFormat(args), args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...any) { + logf(1, logsink.Info, false, noStack, format, args...) +} + +// InfoContext is like [Info], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContext(ctx context.Context, args ...any) { + InfoContextDepth(ctx, 1, args...) +} + +// InfoContextf is like [Infof], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContextf(ctx context.Context, format string, args ...any) { + ctxlogf(ctx, 1, logsink.Info, false, noStack, format, args...) +} + +// InfoContextDepth is like [InfoDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContextDepth(ctx context.Context, depth int, args ...any) { + ctxlogf(ctx, depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) +} + +// InfoContextDepthf is like [InfoDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Info, false, noStack, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...any) { + WarningDepth(1, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) +} + +// WarningDepthf acts as Warningf but uses depth to determine which call frame to log. +// WarningDepthf(0, "msg") is the same as Warningf("msg"). +func WarningDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, format, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...any) { + logf(1, logsink.Warning, false, noStack, lnFormat(args), args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...any) { + logf(1, logsink.Warning, false, noStack, format, args...) +} + +// WarningContext is like [Warning], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContext(ctx context.Context, args ...any) { + WarningContextDepth(ctx, 1, args...) +} + +// WarningContextf is like [Warningf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContextf(ctx context.Context, format string, args ...any) { + ctxlogf(ctx, 1, logsink.Warning, false, noStack, format, args...) +} + +// WarningContextDepth is like [WarningDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContextDepth(ctx context.Context, depth int, args ...any) { + ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) +} + +// WarningContextDepthf is like [WarningDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...any) { + ErrorDepth(1, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...any) { + logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) +} + +// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log. +// ErrorDepthf(0, "msg") is the same as Errorf("msg"). +func ErrorDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Error, false, noStack, format, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...any) { + logf(1, logsink.Error, false, noStack, lnFormat(args), args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...any) { + logf(1, logsink.Error, false, noStack, format, args...) +} + +// ErrorContext is like [Error], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContext(ctx context.Context, args ...any) { + ErrorContextDepth(ctx, 1, args...) +} + +// ErrorContextf is like [Errorf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContextf(ctx context.Context, format string, args ...any) { + ctxlogf(ctx, 1, logsink.Error, false, noStack, format, args...) +} + +// ErrorContextDepth is like [ErrorDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContextDepth(ctx context.Context, depth int, args ...any) { + ctxlogf(ctx, depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) +} + +// ErrorContextDepthf is like [ErrorDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Error, false, noStack, format, args...) +} + +func ctxfatalf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Fatal, false, withStack, format, args...) + sinks.file.Flush() + + err := abortProcess() // Should not return. + + // Failed to abort the process using signals. Dump a stack trace and exit. + Errorf("abortProcess returned unexpectedly: %v", err) + sinks.file.Flush() + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + os.Exit(2) // Exit with the same code as the default SIGABRT handler. +} + +func fatalf(depth int, format string, args ...any) { + ctxfatalf(nil, depth+1, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(2). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...any) { + FatalDepth(1, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...any) { + fatalf(depth+1, defaultFormat(args), args...) +} + +// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log. +// FatalDepthf(0, "msg") is the same as Fatalf("msg"). +func FatalDepthf(depth int, format string, args ...any) { + fatalf(depth+1, format, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(2). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...any) { + fatalf(1, lnFormat(args), args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(2). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...any) { + fatalf(1, format, args...) +} + +// FatalContext is like [Fatal], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func FatalContext(ctx context.Context, args ...any) { + FatalContextDepth(ctx, 1, args...) +} + +// FatalContextf is like [Fatalf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func FatalContextf(ctx context.Context, format string, args ...any) { + ctxfatalf(ctx, 1, format, args...) +} + +// FatalContextDepth is like [FatalDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func FatalContextDepth(ctx context.Context, depth int, args ...any) { + ctxfatalf(ctx, depth+1, defaultFormat(args), args...) +} + +// FatalContextDepthf is like [FatalDepthf], but with an extra [context.Context] parameter. +func FatalContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxfatalf(ctx, depth+1, format, args...) +} + +func ctxexitf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Fatal, false, noStack, format, args...) + sinks.file.Flush() + os.Exit(1) +} + +func exitf(depth int, format string, args ...any) { + ctxexitf(nil, depth+1, format, args...) +} + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...any) { + ExitDepth(1, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...any) { + exitf(depth+1, defaultFormat(args), args...) +} + +// ExitDepthf acts as Exitf but uses depth to determine which call frame to log. +// ExitDepthf(0, "msg") is the same as Exitf("msg"). +func ExitDepthf(depth int, format string, args ...any) { + exitf(depth+1, format, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...any) { + exitf(1, lnFormat(args), args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...any) { + exitf(1, format, args...) +} + +// ExitContext is like [Exit], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContext(ctx context.Context, args ...any) { + ExitContextDepth(ctx, 1, args...) +} + +// ExitContextf is like [Exitf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContextf(ctx context.Context, format string, args ...any) { + ctxexitf(ctx, 1, format, args...) +} + +// ExitContextDepth is like [ExitDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContextDepth(ctx context.Context, depth int, args ...any) { + ctxexitf(ctx, depth+1, defaultFormat(args), args...) +} + +// ExitContextDepthf is like [ExitDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxexitf(ctx, depth+1, format, args...) +} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go new file mode 100644 index 0000000000..8eb8b08c60 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file.go @@ -0,0 +1,418 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/golang/glog/internal/logsink" +) + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +var ( + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files") + logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+ + " (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.") +) + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + if u := lookupUser(); u != "" { + userName = u + } + // Sanitize userName since it is used to construct file paths. + userName = strings.Map(func(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r >= '0' && r <= '9': + default: + return '_' + } + return r + }, userName) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + if *logLink != "" { + lsymlink := filepath.Join(*logLink, link) + os.Remove(lsymlink) // ignore err + os.Symlink(fname, lsymlink) // ignore err + } + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer + filenames() []string +} + +var sinks struct { + stderr stderrSink + file fileSink +} + +func init() { + // Register stderr first: that way if we crash during file-writing at least + // the log will have gone somewhere. + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + + sinks.file.flushChan = make(chan logsink.Severity, 1) + go sinks.file.flushDaemon() +} + +// stderrSink is a logsink.Text that writes log entries to stderr +// if they meet certain conditions. +type stderrSink struct { + mu sync.Mutex + w io.Writer // if nil Emit uses os.Stderr directly +} + +// Enabled implements logsink.Text.Enabled. It returns true if any of the +// various stderr flags are enabled for logs of the given severity, if the log +// message is from the standard "log" package, or if google.Init has not yet run +// (and hence file logging is not yet initialized). +func (s *stderrSink) Enabled(m *logsink.Meta) bool { + return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get() +} + +// Emit implements logsink.Text.Emit. +func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + w := s.w + if w == nil { + w = os.Stderr + } + dn, err := w.Write(data) + n += dn + return n, err +} + +// severityWriters is an array of flushSyncWriter with a value for each +// logsink.Severity. +type severityWriters [4]flushSyncWriter + +// fileSink is a logsink.Text that prints to a set of Google log files. +type fileSink struct { + mu sync.Mutex + // file holds writer for each of the log types. + file severityWriters + flushChan chan logsink.Severity +} + +// Enabled implements logsink.Text.Enabled. It returns true if google.Init +// has run and both --disable_log_to_disk and --logtostderr are false. +func (s *fileSink) Enabled(m *logsink.Meta) bool { + return !toStderr +} + +// Emit implements logsink.Text.Emit +func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + if err = s.createMissingFiles(m.Severity); err != nil { + return 0, err + } + for sev := m.Severity; sev >= logsink.Info; sev-- { + if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil { + err = fErr // Take the first error. + } + } + n = len(data) + if int(m.Severity) > *logBufLevel { + select { + case s.flushChan <- m.Severity: + default: + } + } + + return n, err +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// s.mu is held for all its methods. +type syncBuffer struct { + sink *fileSink + *bufio.Writer + file *os.File + names []string + sev logsink.Severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + return 0, err + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + return n, err +} + +func (sb *syncBuffer) filenames() []string { + return sb.names +} + +const footer = "\nCONTINUED IN NEXT FILE\n" + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + var err error + pn := "" + file, name, err := create(sb.sev.String(), now) + + if sb.file != nil { + // The current log file becomes the previous log at the end of + // this block, so save its name for use in the header of the next + // file. + pn = sb.file.Name() + sb.Flush() + // If there's an existing file, write a footer with the name of + // the next file in the chain, followed by the constant string + // \nCONTINUED IN NEXT FILE\n to make continuation detection simple. + sb.file.Write([]byte("Next log: ")) + sb.file.Write([]byte(name)) + sb.file.Write([]byte(footer)) + sb.file.Close() + } + + sb.file = file + sb.names = append(sb.names, name) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Previous log: %s\n", pn) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createMissingFiles creates all the log files for severity from infoLog up to +// upTo that have not already been created. +// s.mu is held. +func (s *fileSink) createMissingFiles(upTo logsink.Severity) error { + if s.file[upTo] != nil { + return nil + } + now := time.Now() + // Files are created in increasing severity order, so we can be assured that + // if a high severity logfile exists, then so do all of lower severity. + for sev := logsink.Info; sev <= upTo; sev++ { + if s.file[sev] != nil { + continue + } + sb := &syncBuffer{ + sink: s, + sev: sev, + } + if err := sb.rotateFile(now); err != nil { + return err + } + s.file[sev] = sb + } + return nil +} + +// flushDaemon periodically flushes the log file buffers. +func (s *fileSink) flushDaemon() { + tick := time.NewTicker(30 * time.Second) + defer tick.Stop() + for { + select { + case <-tick.C: + s.Flush() + case sev := <-s.flushChan: + s.flush(sev) + } + } +} + +// Flush flushes all pending log I/O. +func Flush() { + sinks.file.Flush() +} + +// Flush flushes all the logs and attempts to "sync" their data to disk. +func (s *fileSink) Flush() error { + return s.flush(logsink.Info) +} + +// flush flushes all logs of severity threshold or greater. +func (s *fileSink) flush(threshold logsink.Severity) error { + var firstErr error + updateErr := func(err error) { + if err != nil && firstErr == nil { + firstErr = err + } + } + + // Remember where we flushed, so we can call sync without holding + // the lock. + var files []flushSyncWriter + func() { + s.mu.Lock() + defer s.mu.Unlock() + // Flush from fatal down, in case there's trouble flushing. + for sev := logsink.Fatal; sev >= threshold; sev-- { + if file := s.file[sev]; file != nil { + updateErr(file.Flush()) + files = append(files, file) + } + } + }() + + for _, file := range files { + updateErr(file.Sync()) + } + + return firstErr +} + +// Names returns the names of the log files holding the FATAL, ERROR, +// WARNING, or INFO logs. Returns ErrNoLog if the log for the given +// level doesn't exist (e.g. because no messages of that level have been +// written). This may return multiple names if the log type requested +// has rolled over. +func Names(s string) ([]string, error) { + severity, err := logsink.ParseSeverity(s) + if err != nil { + return nil, err + } + + sinks.file.mu.Lock() + defer sinks.file.mu.Unlock() + f := sinks.file.file[severity] + if f == nil { + return nil, ErrNoLog + } + + return f.filenames(), nil +} diff --git a/vendor/github.com/golang/glog/glog_file_linux.go b/vendor/github.com/golang/glog/glog_file_linux.go new file mode 100644 index 0000000000..d795092d03 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_linux.go @@ -0,0 +1,39 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux + +package glog + +import ( + "errors" + "runtime" + "syscall" +) + +// abortProcess attempts to kill the current process in a way that will dump the +// currently-running goroutines someplace useful (like stderr). +// +// It does this by sending SIGABRT to the current thread. +// +// If successful, abortProcess does not return. +func abortProcess() error { + runtime.LockOSThread() + if err := syscall.Tgkill(syscall.Getpid(), syscall.Gettid(), syscall.SIGABRT); err != nil { + return err + } + return errors.New("log: killed current thread with SIGABRT, but still running") +} diff --git a/vendor/github.com/golang/glog/glog_file_nonwindows.go b/vendor/github.com/golang/glog/glog_file_nonwindows.go new file mode 100644 index 0000000000..d5cdb793c5 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_nonwindows.go @@ -0,0 +1,12 @@ +//go:build !windows + +package glog + +import "os/user" + +func lookupUser() string { + if current, err := user.Current(); err == nil { + return current.Username + } + return "" +} diff --git a/vendor/github.com/golang/glog/glog_file_other.go b/vendor/github.com/golang/glog/glog_file_other.go new file mode 100644 index 0000000000..9540f14fc0 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_other.go @@ -0,0 +1,30 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !(unix || windows) + +package glog + +import ( + "fmt" + "runtime" +) + +// abortProcess returns an error on platforms that presumably don't support signals. +func abortProcess() error { + return fmt.Errorf("not sending SIGABRT (%s/%s does not support signals), falling back", runtime.GOOS, runtime.GOARCH) + +} diff --git a/vendor/github.com/golang/glog/glog_file_posix.go b/vendor/github.com/golang/glog/glog_file_posix.go new file mode 100644 index 0000000000..c27c7c0e4a --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_posix.go @@ -0,0 +1,53 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (unix || windows) && !linux + +package glog + +import ( + "os" + "syscall" + "time" +) + +// abortProcess attempts to kill the current process in a way that will dump the +// currently-running goroutines someplace useful (like stderr). +// +// It does this by sending SIGABRT to the current process. Unfortunately, the +// signal may or may not be delivered to the current thread; in order to do that +// portably, we would need to add a cgo dependency and call pthread_kill. +// +// If successful, abortProcess does not return. +func abortProcess() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + if err := p.Signal(syscall.SIGABRT); err != nil { + return err + } + + // Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to + // run (and eventually terminate the process themselves). + // + // We could just "select{}" here, but there's an outside chance that would + // trigger the runtime's deadlock detector if there happen not to be any + // background goroutines running. So we'll sleep a while first to give + // the signal some time. + time.Sleep(10 * time.Second) + select {} +} diff --git a/vendor/github.com/golang/glog/glog_file_windows.go b/vendor/github.com/golang/glog/glog_file_windows.go new file mode 100644 index 0000000000..a9e4f609df --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_windows.go @@ -0,0 +1,30 @@ +//go:build windows + +package glog + +import ( + "syscall" +) + +// This follows the logic in the standard library's user.Current() function, except +// that it leaves out the potentially expensive calls required to look up the user's +// display name in Active Directory. +func lookupUser() string { + token, err := syscall.OpenCurrentProcessToken() + if err != nil { + return "" + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "" + } + username, _, accountType, err := tokenUser.User.Sid.LookupAccount("") + if err != nil { + return "" + } + if accountType != syscall.SidTypeUser { + return "" + } + return username +} diff --git a/vendor/github.com/golang/glog/glog_flags.go b/vendor/github.com/golang/glog/glog_flags.go new file mode 100644 index 0000000000..fa4371afd3 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_flags.go @@ -0,0 +1,398 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/golang/glog/internal/logsink" +) + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + full bool // The pattern wants to match the full path + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(full, file string) bool { + if m.literal { + if m.full { + return full == m.pattern + } + return file == m.pattern + } + if m.full { + match, _ := filepath.Match(m.pattern, full) + return match + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// isFull reports whether the pattern matches the full file path, that is, +// whether it contains /. +func isFull(pattern string) bool { + return strings.ContainsRune(pattern, '/') +} + +// verboseFlags represents the setting of the -v and -vmodule flags. +type verboseFlags struct { + // moduleLevelCache is a sync.Map storing the -vmodule Level for each V() + // call site, identified by PC. If there is no matching -vmodule filter, + // the cached value is exactly v. moduleLevelCache is replaced with a new + // Map whenever the -vmodule or -v flag changes state. + moduleLevelCache atomic.Value + + // mu guards all fields below. + mu sync.Mutex + + // v stores the value of the -v flag. It may be read safely using + // sync.LoadInt32, but is only modified under mu. + v Level + + // module stores the parsed -vmodule flag. + module []modulePat + + // moduleLength caches len(module). If greater than zero, it + // means vmodule is enabled. It may be read safely using sync.LoadInt32, but + // is only modified under mu. + moduleLength int32 +} + +// NOTE: For compatibility with the open-sourced v1 version of this +// package (github.com/golang/glog) we need to retain that flag.Level +// implements the flag.Value interface. See also go/log-vs-glog. + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(l.Get().(Level)), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() any { + if l == &vflags.v { + // l is the value registered for the -v flag. + return Level(atomic.LoadInt32((*int32)(l))) + } + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + if l == &vflags.v { + // l is the value registered for the -v flag. + vflags.mu.Lock() + defer vflags.mu.Unlock() + vflags.moduleLevelCache.Store(&sync.Map{}) + atomic.StoreInt32((*int32)(l), int32(v)) + return nil + } + *l = Level(v) + return nil +} + +// vModuleFlag is the flag.Value for the --vmodule flag. +type vModuleFlag struct{ *verboseFlags } + +func (f vModuleFlag) String() string { + // Do not panic on the zero value. + // https://groups.google.com/g/golang-nuts/c/Atlr8uAjn6U/m/iId17Td5BQAJ. + if f.verboseFlags == nil { + return "" + } + f.mu.Lock() + defer f.mu.Unlock() + + var b bytes.Buffer + for i, f := range f.module { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get returns nil for this flag type since the struct is not exported. +func (f vModuleFlag) Get() any { return nil } + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3 +func (f vModuleFlag) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)}) + } + + f.mu.Lock() + defer f.mu.Unlock() + f.module = filter + atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module))) + f.moduleLevelCache.Store(&sync.Map{}) + return nil +} + +func (f *verboseFlags) levelForPC(pc uintptr) Level { + if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok { + return level.(Level) + } + + f.mu.Lock() + defer f.mu.Unlock() + level := Level(f.v) + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d for + // regular matches, /a/b/c/d for full matches. + file = strings.TrimSuffix(file, ".go") + full := file + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range f.module { + if filter.match(full, file) { + level = filter.level + break // Use the first matching level. + } + } + f.moduleLevelCache.Load().(*sync.Map).Store(pc, level) + return level +} + +func (f *verboseFlags) enabled(callerDepth int, level Level) bool { + if atomic.LoadInt32(&f.moduleLength) == 0 { + // No vmodule values specified, so compare against v level. + return Level(atomic.LoadInt32((*int32)(&f.v))) >= level + } + + pcs := [1]uintptr{} + if runtime.Callers(callerDepth+2, pcs[:]) < 1 { + return false + } + frame, _ := runtime.CallersFrames(pcs[:]).Next() + return f.levelForPC(frame.Entry) >= level +} + +// traceLocation represents an entry in the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +func parseTraceLocation(value string) (traceLocation, error) { + fields := strings.Split(value, ":") + if len(fields) != 2 { + return traceLocation{}, errTraceSyntax + } + file, lineStr := fields[0], fields[1] + if !strings.Contains(file, ".") { + return traceLocation{}, errTraceSyntax + } + line, err := strconv.Atoi(lineStr) + if err != nil { + return traceLocation{}, errTraceSyntax + } + if line < 0 { + return traceLocation{}, errors.New("negative value for line") + } + return traceLocation{file, line}, nil +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +func (t traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t traceLocation) String() string { + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// traceLocations represents the -log_backtrace_at flag. +// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456 +// Note that unlike vmodule the file extension is included here. +type traceLocations struct { + mu sync.Mutex + locsLen int32 // Safe for atomic read without mu. + locs []traceLocation +} + +func (t *traceLocations) String() string { + t.mu.Lock() + defer t.mu.Unlock() + + var buf bytes.Buffer + for i, tl := range t.locs { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(tl.String()) + } + return buf.String() +} + +// Get always returns nil for this flag type since the struct is not exported +func (t *traceLocations) Get() any { return nil } + +func (t *traceLocations) Set(value string) error { + var locs []traceLocation + for _, s := range strings.Split(value, ",") { + if s == "" { + continue + } + loc, err := parseTraceLocation(s) + if err != nil { + return err + } + locs = append(locs, loc) + } + + t.mu.Lock() + defer t.mu.Unlock() + atomic.StoreInt32(&t.locsLen, int32(len(locs))) + t.locs = locs + return nil +} + +func (t *traceLocations) match(file string, line int) bool { + if atomic.LoadInt32(&t.locsLen) == 0 { + return false + } + + t.mu.Lock() + defer t.mu.Unlock() + for _, tl := range t.locs { + if tl.match(file, line) { + return true + } + } + return false +} + +// severityFlag is an atomic flag.Value implementation for logsink.Severity. +type severityFlag int32 + +func (s *severityFlag) get() logsink.Severity { + return logsink.Severity(atomic.LoadInt32((*int32)(s))) +} +func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) } +func (s *severityFlag) Get() any { return s.get() } +func (s *severityFlag) Set(value string) error { + threshold, err := logsink.ParseSeverity(value) + if err != nil { + // Not a severity name. Try a raw number. + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = logsink.Severity(v) + if threshold < logsink.Info || threshold > logsink.Fatal { + return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal) + } + } + atomic.StoreInt32((*int32)(s), int32(threshold)) + return nil +} + +var ( + vflags verboseFlags // The -v and -vmodule flags. + + logBacktraceAt traceLocations // The -log_backtrace_at flag. + + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + stderrThreshold severityFlag // The -stderrthreshold flag. +) + +// verboseEnabled returns whether the caller at the given depth should emit +// verbose logs at the given level, with depth 0 identifying the caller of +// verboseEnabled. +func verboseEnabled(callerDepth int, level Level) bool { + return vflags.enabled(callerDepth+1, level) +} + +// backtraceAt returns whether the logging call at the given function and line +// should also emit a backtrace of the current call stack. +func backtraceAt(file string, line int) bool { + return logBacktraceAt.match(file, line) +} + +func init() { + vflags.moduleLevelCache.Store(&sync.Map{}) + + flag.Var(&vflags.v, "v", "log level for V logs") + flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + + flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + stderrThreshold = severityFlag(logsink.Error) + + flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") +} diff --git a/vendor/github.com/golang/glog/internal/logsink/logsink.go b/vendor/github.com/golang/glog/internal/logsink/logsink.go new file mode 100644 index 0000000000..28c38a6abc --- /dev/null +++ b/vendor/github.com/golang/glog/internal/logsink/logsink.go @@ -0,0 +1,393 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logsink + +import ( + "bytes" + "context" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/glog/internal/stackdump" +) + +// MaxLogMessageLen is the limit on length of a formatted log message, including +// the standard line prefix and trailing newline. +// +// Chosen to match C++ glog. +const MaxLogMessageLen = 15000 + +// A Severity is a severity at which a message can be logged. +type Severity int8 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + Info Severity = iota + Warning + Error + + // Fatal contains logs written immediately before the process terminates. + // + // Sink implementations should not terminate the process themselves: the log + // package will perform any necessary cleanup and terminate the process as + // appropriate. + Fatal +) + +func (s Severity) String() string { + switch s { + case Info: + return "INFO" + case Warning: + return "WARNING" + case Error: + return "ERROR" + case Fatal: + return "FATAL" + } + return fmt.Sprintf("%T(%d)", s, s) +} + +// ParseSeverity returns the case-insensitive Severity value for the given string. +func ParseSeverity(name string) (Severity, error) { + name = strings.ToUpper(name) + for s := Info; s <= Fatal; s++ { + if s.String() == name { + return s, nil + } + } + return -1, fmt.Errorf("logsink: invalid severity %q", name) +} + +// Meta is metadata about a logging call. +type Meta struct { + // The context with which the log call was made (or nil). If set, the context + // is only valid during the logsink.Structured.Printf call, it should not be + // retained. + Context context.Context + + // Time is the time at which the log call was made. + Time time.Time + + // File is the source file from which the log entry originates. + File string + // Line is the line offset within the source file. + Line int + // Depth is the number of stack frames between the logsink and the log call. + Depth int + + Severity Severity + + // Verbose indicates whether the call was made via "log.V". Log entries below + // the current verbosity threshold are not sent to the sink. + Verbose bool + + // Thread ID. This can be populated with a thread ID from another source, + // such as a system we are importing logs from. In the normal case, this + // will be set to the process ID (PID), since Go doesn't have threads. + Thread int64 + + // Stack trace starting in the logging function. May be nil. + // A logsink should implement the StackWanter interface to request this. + // + // Even if WantStack returns false, this field may be set (e.g. if another + // sink wants a stack trace). + Stack *stackdump.Stack +} + +// Structured is a logging destination that accepts structured data as input. +type Structured interface { + // Printf formats according to a fmt.Printf format specifier and writes a log + // entry. The precise result of formatting depends on the sink, but should + // aim for consistency with fmt.Printf. + // + // Printf returns the number of bytes occupied by the log entry, which + // may not be equal to the total number of bytes written. + // + // Printf returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + Printf(meta *Meta, format string, a ...any) (n int, err error) +} + +// StackWanter can be implemented by a logsink.Structured to indicate that it +// wants a stack trace to accompany at least some of the log messages it receives. +type StackWanter interface { + // WantStack returns true if the sink requires a stack trace for a log message + // with this metadata. + // + // NOTE: Returning true implies that meta.Stack will be non-nil. Returning + // false does NOT imply that meta.Stack will be nil. + WantStack(meta *Meta) bool +} + +// Text is a logging destination that accepts pre-formatted log lines (instead of +// structured data). +type Text interface { + // Enabled returns whether this sink should output messages for the given + // Meta. If the sink returns false for a given Meta, the Printf function will + // not call Emit on it for the corresponding log message. + Enabled(*Meta) bool + + // Emit writes a pre-formatted text log entry (including any applicable + // header) to the log. It returns the number of bytes occupied by the entry + // (which may differ from the length of the passed-in slice). + // + // Emit returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + // + // NOTE: When developing a text sink, keep in mind the surface in which the + // logs will be displayed, and whether it's important that the sink be + // resistent to tampering in the style of b/211428300. Standard text sinks + // (like `stderrSink`) do not protect against this (e.g. by escaping + // characters) because the cases where they would show user-influenced bytes + // are vanishingly small. + Emit(*Meta, []byte) (n int, err error) +} + +// bufs is a pool of *bytes.Buffer used in formatting log entries. +var bufs sync.Pool // Pool of *bytes.Buffer. + +// textPrintf formats a text log entry and emits it to all specified Text sinks. +// +// The returned n is the maximum across all Emit calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) { + // We expect at most file, stderr, and perhaps syslog. If there are more, + // we'll end up allocating - no big deal. + const maxExpectedTextSinks = 3 + var noAllocSinks [maxExpectedTextSinks]Text + + sinks := noAllocSinks[:0] + for _, s := range textSinks { + if s.Enabled(m) { + sinks = append(sinks, s) + } + } + if len(sinks) == 0 && m.Severity != Fatal { + return 0, nil // No TextSinks specified; don't bother formatting. + } + + bufi := bufs.Get() + var buf *bytes.Buffer + if bufi == nil { + buf = bytes.NewBuffer(nil) + bufi = buf + } else { + buf = bufi.(*bytes.Buffer) + buf.Reset() + } + + // Lmmdd hh:mm:ss.uuuuuu PID/GID file:line] + // + // The "PID" entry arguably ought to be TID for consistency with other + // environments, but TID is not meaningful in a Go program due to the + // multiplexing of goroutines across threads. + // + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + const severityChar = "IWEF" + buf.WriteByte(severityChar[m.Severity]) + + _, month, day := m.Time.Date() + hour, minute, second := m.Time.Clock() + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0') + buf.WriteByte(' ') + + nDigits(buf, 7, uint64(m.Thread), ' ') + buf.WriteByte(' ') + + { + file := m.File + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + buf.WriteString(file) + } + + buf.WriteByte(':') + { + var tmp [19]byte + buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10)) + } + buf.WriteString("] ") + + msgStart := buf.Len() + fmt.Fprintf(buf, format, args...) + if buf.Len() > MaxLogMessageLen-1 { + buf.Truncate(MaxLogMessageLen - 1) + } + msgEnd := buf.Len() + if b := buf.Bytes(); b[len(b)-1] != '\n' { + buf.WriteByte('\n') + } + + for _, s := range sinks { + sn, sErr := s.Emit(m, buf.Bytes()) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + + if m.Severity == Fatal { + savedM := *m + fatalMessageStore(savedEntry{ + meta: &savedM, + msg: buf.Bytes()[msgStart:msgEnd], + }) + } else { + bufs.Put(bufi) + } + return n, err +} + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer to buf. +func twoDigits(buf *bytes.Buffer, d int) { + buf.WriteByte(digits[(d/10)%10]) + buf.WriteByte(digits[d%10]) +} + +// nDigits formats an n-digit integer to buf, padding with pad on the left. It +// assumes d != 0. +func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) { + var tmp [20]byte + + cutoff := len(tmp) - n + j := len(tmp) - 1 + for ; d > 0; j-- { + tmp[j] = digits[d%10] + d /= 10 + } + for ; j >= cutoff; j-- { + tmp[j] = pad + } + j++ + buf.Write(tmp[j:]) +} + +// Printf writes a log entry to all registered TextSinks in this package, then +// to all registered StructuredSinks. +// +// The returned n is the maximum across all Emit and Printf calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func Printf(m *Meta, format string, args ...any) (n int, err error) { + m.Depth++ + n, err = textPrintf(m, TextSinks, format, args...) + + for _, sink := range StructuredSinks { + // TODO: Support TextSinks that implement StackWanter? + if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) { + if m.Stack == nil { + // First, try to find a stacktrace in args, otherwise generate one. + for _, arg := range args { + if stack, ok := arg.(stackdump.Stack); ok { + m.Stack = &stack + break + } + } + if m.Stack == nil { + stack := stackdump.Caller( /* skipDepth = */ m.Depth) + m.Stack = &stack + } + } + } + sn, sErr := sink.Printf(m, format, args...) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + return n, err +} + +// The sets of sinks to which logs should be written. +// +// These must only be modified during package init, and are read-only thereafter. +var ( + // StructuredSinks is the set of Structured sink instances to which logs + // should be written. + StructuredSinks []Structured + + // TextSinks is the set of Text sink instances to which logs should be + // written. + // + // These are registered separately from Structured sink implementations to + // avoid the need to repeat the work of formatting a message for each Text + // sink that writes it. The package-level Printf function writes to both sets + // independenty, so a given log destination should only register a Structured + // *or* a Text sink (not both). + TextSinks []Text +) + +type savedEntry struct { + meta *Meta + msg []byte +} + +// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks. +// +// The purpose of this sink is to allow applications to intercept logging calls before they are +// serialized and sent to Text sinks. For example, if one needs to redact PII from logging +// arguments before they reach STDERR, one solution would be to do the redacting in a Structured +// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that +// StructuredTextWrapper instance. This is how one could set this up in their application: +// +// func init() { +// +// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks} +// // sanitizersink will intercept logs and remove PII +// sanitizer := sanitizersink{Sink: &wrapper} +// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer) +// logsink.TextSinks = nil +// +// } +type StructuredTextWrapper struct { + // TextSinks is the set of Text sinks that should receive logs from this + // StructuredTextWrapper instance. + TextSinks []Text +} + +// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper. +func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) { + return textPrintf(meta, w.TextSinks, format, args...) +} diff --git a/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go b/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go new file mode 100644 index 0000000000..3dc269abc2 --- /dev/null +++ b/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go @@ -0,0 +1,35 @@ +package logsink + +import ( + "sync/atomic" + "unsafe" +) + +func fatalMessageStore(e savedEntry) { + // Only put a new one in if we haven't assigned before. + atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e)) +} + +var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer + +// FatalMessage returns the Meta and message contents of the first message +// logged with Fatal severity, or false if none has occurred. +func FatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(atomic.LoadPointer(&fatalMessage)) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} + +// DoNotUseRacyFatalMessage is FatalMessage, but worse. +// +//go:norace +//go:nosplit +func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(fatalMessage) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} diff --git a/vendor/github.com/golang/glog/internal/stackdump/stackdump.go b/vendor/github.com/golang/glog/internal/stackdump/stackdump.go new file mode 100644 index 0000000000..3427c9d6bd --- /dev/null +++ b/vendor/github.com/golang/glog/internal/stackdump/stackdump.go @@ -0,0 +1,127 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdump provides wrappers for runtime.Stack and runtime.Callers +// with uniform support for skipping caller frames. +// +// ⚠ Unlike the functions in the runtime package, these may allocate a +// non-trivial quantity of memory: use them with care. ⚠ +package stackdump + +import ( + "bytes" + "runtime" +) + +// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to +// runtime.Stack itself or 0 if it does not. +// +// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc +// compiler does not. +var runtimeStackSelfFrames = func() int { + for n := 1 << 10; n < 1<<20; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if bytes.Contains(buf[:n], []byte("runtime.Stack")) { + return 1 + } else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 { + return 0 + } + } + return 0 +}() + +// Stack is a stack dump for a single goroutine. +type Stack struct { + // Text is a representation of the stack dump in a human-readable format. + Text []byte + + // PC is a representation of the stack dump using raw program counter values. + PC []uintptr +} + +func (s Stack) String() string { return string(s.Text) } + +// Caller returns the Stack dump for the calling goroutine, starting skipDepth +// frames before the caller of Caller. (Caller(0) provides a dump starting at +// the caller of this function.) +func Caller(skipDepth int) Stack { + return Stack{ + Text: CallerText(skipDepth + 1), + PC: CallerPC(skipDepth + 1), + } +} + +// CallerText returns a textual dump of the stack starting skipDepth frames before +// the caller. (CallerText(0) provides a dump starting at the caller of this +// function.) +func CallerText(skipDepth int) []byte { + for n := 1 << 10; ; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if n < len(buf) { + return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n]) + } + } +} + +// CallerPC returns a dump of the program counters of the stack starting +// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at +// the caller of this function.) +func CallerPC(skipDepth int) []uintptr { + for n := 1 << 8; ; n *= 2 { + buf := make([]uintptr, n) + n := runtime.Callers(skipDepth+2, buf) + if n < len(buf) { + return buf[:n] + } + } +} + +// pruneFrames removes the topmost skipDepth frames of the first goroutine in a +// textual stack dump. It overwrites the passed-in slice. +// +// If there are fewer than skipDepth frames in the first goroutine's stack, +// pruneFrames prunes it to an empty stack and leaves the remaining contents +// intact. +func pruneFrames(skipDepth int, stack []byte) []byte { + headerLen := 0 + for i, c := range stack { + if c == '\n' { + headerLen = i + 1 + break + } + } + if headerLen == 0 { + return stack // No header line - not a well-formed stack trace. + } + + skipLen := headerLen + skipNewlines := skipDepth * 2 + for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ { + c := stack[skipLen] + if c != '\n' { + continue + } + skipNewlines-- + skipLen++ + if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' { + break + } + } + + pruned := stack[skipLen-headerLen:] + copy(pruned, stack[:headerLen]) + return pruned +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index fdff3fdb4c..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33de..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d2..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd851..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3ef..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70d..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f807600..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d973..0000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md index 6062a4dacd..eab5dbf7ba 100644 --- a/vendor/github.com/google/btree/README.md +++ b/vendor/github.com/google/btree/README.md @@ -1,7 +1,5 @@ # BTree implementation for Go -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - This package provides an in-memory B-Tree implementation for Go, useful as an ordered, mutable data structure. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index b83acdbc6d..969b910d70 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.18 +// +build !go1.18 + // Package btree implements in-memory B-Trees of arbitrary degree. // // btree implements an in-memory B-Tree for use as an ordered data structure. diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 0000000000..e44a0f4880 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go index 250c81e8c8..16ae66faa3 100644 --- a/vendor/github.com/google/gnostic-models/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -20,8 +20,8 @@ import ( "os/exec" "strings" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" yaml "gopkg.in/yaml.v3" extensions "github.com/google/gnostic-models/extensions" @@ -33,7 +33,7 @@ type ExtensionHandler struct { } // CallExtension calls a binary extension handler. -func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) { if context == nil || context.ExtensionHandlers == nil { return false, nil, nil } @@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl return handled, response, err } -func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) { if extensionHandlers.Name != "" { yamlData, _ := yaml.Marshal(in) request := &extensions.ExtensionHandlerRequest{ diff --git a/vendor/github.com/google/gnostic-models/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go index ec8afd0092..0768163e5a 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extensions.go +++ b/vendor/github.com/google/gnostic-models/extensions/extensions.go @@ -19,8 +19,8 @@ import ( "log" "os" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" ) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) @@ -54,7 +54,7 @@ func Main(handler extensionHandler) { response.Errors = append(response.Errors, err.Error()) } else if handled { response.Handled = true - response.Value, err = ptypes.MarshalAny(output) + response.Value, err = anypb.New(output) if err != nil { response.Errors = append(response.Errors, err.Error()) } diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go new file mode 100644 index 0000000000..5c2b63e262 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go @@ -0,0 +1,182 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.19.3 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto new file mode 100644 index 0000000000..09ee0aac51 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "google/protobuf/descriptor.proto"; +import "openapiv3/OpenAPIv3.proto"; + +// The Go package name. +option go_package = "./openapiv3;openapi_v3"; +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig deleted file mode 100644 index 2940ec92ac..0000000000 --- a/vendor/github.com/gorilla/websocket/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index 84039fec68..cd3fcd1ef7 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1 +1,25 @@ -coverage.coverprofile +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml deleted file mode 100644 index 34882139e1..0000000000 --- a/vendor/github.com/gorilla/websocket/.golangci.yml +++ /dev/null @@ -1,3 +0,0 @@ -run: - skip-dirs: - - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 0000000000..1931f40068 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index bb9d80bc9b..9171c97225 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,27 +1,22 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile deleted file mode 100644 index 603a63f50a..0000000000 --- a/vendor/github.com/gorilla/websocket/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: golangci-lint -golangci-lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint" - golangci-lint run -v - -.PHONY: gosec -gosec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec" - gosec -exclude-dir examples ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck" - govulncheck ./... - -.PHONY: verify -verify: golangci-lint gosec govulncheck - -.PHONY: test -test: - @echo "##### Running tests" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 1fd5e9c4e7..d33ed7fdd8 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,13 +1,10 @@ -# gorilla/websocket +# Gorilla WebSocket -![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) -[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) -[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. ### Documentation @@ -17,7 +14,6 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) -* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) ### Status @@ -34,3 +30,4 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 815b0ca5c8..04fdafee18 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -11,16 +11,13 @@ import ( "errors" "fmt" "io" - "log" - + "io/ioutil" "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" - - "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -228,7 +225,6 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || - //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -294,9 +290,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } err = c.SetDeadline(deadline) if err != nil { - if err := c.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + c.Close() return nil, err } return c, nil @@ -310,7 +304,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return nil, nil, err } if proxyURL != nil { - dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } @@ -336,9 +330,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h defer func() { if netConn != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() } }() @@ -408,7 +400,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) - resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } @@ -426,19 +418,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h break } - resp.Body = io.NopCloser(bytes.NewReader([]byte{})) + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - if err := netConn.SetDeadline(time.Time{}); err != nil { - return nil, nil, err - } + netConn.SetDeadline(time.Time{}) netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{MinVersion: tls.VersionTLS12} + return &tls.Config{} } return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 9fed0ef521..813ffb1e84 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,7 +8,6 @@ import ( "compress/flate" "errors" "io" - "log" "strings" "sync" ) @@ -34,9 +33,7 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { - panic(err) - } + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) return &flateReadWrapper{fr} } @@ -135,9 +132,7 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - if err := r.Close(); err != nil { - log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) - } + r.Close() } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 221e6cf798..5161ef81f6 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -6,11 +6,11 @@ package websocket import ( "bufio" - "crypto/rand" "encoding/binary" "errors" "io" - "log" + "io/ioutil" + "math/rand" "net" "strconv" "strings" @@ -181,20 +181,13 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) -// maskRand is an io.Reader for generating mask bytes. The reader is initialized -// to crypto/rand Reader. Tests swap the reader to a math/rand reader for -// reproducible results. -var maskRand = rand.Reader - -// newMaskKey returns a new 32 bit value for masking client frames. func newMaskKey() [4]byte { - var k [4]byte - _, _ = io.ReadFull(maskRand, k[:]) - return k + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} } func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok { + if e, ok := err.(net.Error); ok && e.Temporary() { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err @@ -379,9 +372,7 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - if _, err := c.br.Discard(len(p)); err != nil { - return p, err - } + c.br.Discard(len(p)) return p, err } @@ -396,9 +387,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return err } - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } + c.conn.SetWriteDeadline(deadline) if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { @@ -408,7 +397,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return c.writeFatal(err) } if frameType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) + c.writeFatal(ErrCloseSent) } return nil } @@ -449,7 +438,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er d := 1000 * time.Hour if !deadline.IsZero() { - d = time.Until(deadline) + d = deadline.Sub(time.Now()) if d < 0 { return errWriteTimeout } @@ -471,15 +460,13 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } + c.conn.SetWriteDeadline(deadline) _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) + c.writeFatal(ErrCloseSent) } return err } @@ -490,9 +477,7 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - if err := c.writer.Close(); err != nil { - log.Printf("websocket: discarding writer close error: %v", err) - } + c.writer.Close() c.writer = nil } @@ -645,7 +630,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { } if final { - _ = w.endMessage(errWriteClosed) + w.endMessage(errWriteClosed) return nil } @@ -810,7 +795,7 @@ func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { - if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } @@ -832,9 +817,7 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { - return noFrame, err - } + c.setReadRemaining(int64(p[1] & 0x7f)) c.readDecompress = false if rsv1 { @@ -939,9 +922,7 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { - return noFrame, err - } + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) return noFrame, ErrReadLimit } @@ -953,9 +934,7 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - if err := c.setReadRemaining(0); err != nil { - return noFrame, err - } + c.setReadRemaining(0) if err != nil { return noFrame, err } @@ -1002,9 +981,7 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { - return err - } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) return errors.New("websocket: " + message) } @@ -1021,9 +998,7 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - if err := c.reader.Close(); err != nil { - log.Printf("websocket: discarding reader close error: %v", err) - } + c.reader.Close() c.reader = nil } @@ -1079,9 +1054,7 @@ func (r *messageReader) Read(b []byte) (int, error) { } rem := c.readRemaining rem -= int64(n) - if err := c.setReadRemaining(rem); err != nil { - return 0, err - } + c.setReadRemaining(rem) if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1121,7 +1094,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { if err != nil { return messageType, nil, err } - p, err = io.ReadAll(r) + p, err = ioutil.ReadAll(r) return messageType, p, err } @@ -1163,9 +1136,7 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { - return err - } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) return nil } } @@ -1190,7 +1161,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil - } else if _, ok := err.(net.Error); ok { + } else if e, ok := err.(net.Error); ok && e.Temporary() { return nil } return err diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index 67d0968be8..d0742bf2a5 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,7 +9,6 @@ package websocket import "unsafe" -// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -23,7 +22,6 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. - //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -38,13 +36,11 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } - //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { - //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index 80f55d1eac..e0f466b72f 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -8,13 +8,10 @@ import ( "bufio" "encoding/base64" "errors" - "log" "net" "net/http" "net/url" "strings" - - "golang.org/x/net/proxy" ) type netDialerFunc func(network, addr string) (net.Conn, error) @@ -24,7 +21,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { } func init() { - proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } @@ -58,9 +55,7 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } @@ -69,16 +64,12 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } if resp.StatusCode != 200 { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 1e720e1da4..bb33597432 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -8,7 +8,6 @@ import ( "bufio" "errors" "io" - "log" "net/http" "net/url" "strings" @@ -184,9 +183,7 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if brw.Reader.Buffered() > 0 { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() return nil, errors.New("websocket: client sent data before handshake is complete") } @@ -251,34 +248,17 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. - if err := netConn.SetDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetDeadline(time.Time{}) if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) } if _, err = netConn.Write(p); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() return nil, err } if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetWriteDeadline(time.Time{}) } return c, nil @@ -376,12 +356,8 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) - if err := bw.WriteByte(0); err != nil { - panic(err) - } - if err := bw.Flush(); err != nil { - log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) - } + bw.WriteByte(0) + bw.Flush() bw.Reset(originalWriter) diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go index 7f38645348..a62b68ccb1 100644 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -1,3 +1,6 @@ +//go:build go1.17 +// +build go1.17 + package websocket import ( diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 0000000000..e1b2b44f6e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 9b1a629bff..31a5dee646 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + "crypto/sha1" "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + h := sha1.New() h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 0000000000..2e668f6b88 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/kubeflow/spark-operator/LICENSE b/vendor/github.com/kubeflow/spark-operator/LICENSE new file mode 100644 index 0000000000..f49a4e16e6 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/defaults.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/defaults.go new file mode 100644 index 0000000000..a6e66d3217 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/defaults.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import "strconv" + +// SetSparkApplicationDefaults sets default values for certain fields of a SparkApplication. +func SetSparkApplicationDefaults(app *SparkApplication) { + if app == nil { + return + } + + if app.Spec.Type == "" { + app.Spec.Type = SparkApplicationTypeScala + } + + if app.Spec.Mode == "" { + app.Spec.Mode = DeployModeCluster + } + + if app.Spec.RestartPolicy.Type == "" { + app.Spec.RestartPolicy.Type = RestartPolicyNever + } + + if app.Spec.RestartPolicy.Type != RestartPolicyNever { + // Default to 5 sec if the RestartPolicy is OnFailure or Always and these values aren't specified. + if app.Spec.RestartPolicy.OnFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnFailureRetryInterval = new(int64) + *app.Spec.RestartPolicy.OnFailureRetryInterval = 5 + } + + if app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = new(int64) + *app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = 5 + } + } + + setDriverSpecDefaults(&app.Spec.Driver, app.Spec.SparkConf) + setExecutorSpecDefaults(&app.Spec.Executor, app.Spec.SparkConf, app.Spec.DynamicAllocation) +} + +func setDriverSpecDefaults(spec *DriverSpec, sparkConf map[string]string) { + if _, exists := sparkConf["spark.driver.cores"]; !exists && spec.Cores == nil { + spec.Cores = new(int32) + *spec.Cores = 1 + } + if _, exists := sparkConf["spark.driver.memory"]; !exists && spec.Memory == nil { + spec.Memory = new(string) + *spec.Memory = "1g" + } +} + +func setExecutorSpecDefaults(spec *ExecutorSpec, sparkConf map[string]string, allocSpec *DynamicAllocation) { + if _, exists := sparkConf["spark.executor.cores"]; !exists && spec.Cores == nil { + spec.Cores = new(int32) + *spec.Cores = 1 + } + if _, exists := sparkConf["spark.executor.memory"]; !exists && spec.Memory == nil { + spec.Memory = new(string) + *spec.Memory = "1g" + } + var dynalloc, _ = sparkConf["spark.dynamicallocation.enabled"] + if dynamic, _ := strconv.ParseBool(dynalloc); !dynamic && (allocSpec == nil || !allocSpec.Enabled) { + if _, exists := sparkConf["spark.executor.instances"]; !exists && spec.Instances == nil { + spec.Instances = new(int32) + *spec.Instances = 1 + } + } +} diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/doc.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/doc.go new file mode 100644 index 0000000000..660420bd25 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register + +// Package v1beta2 is the v1beta2 version of the API. +// +groupName=sparkoperator.k8s.io +// +versionName=v1beta2 +package v1beta2 diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/groupversion_info.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/groupversion_info.go new file mode 100644 index 0000000000..0f8277c704 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=sparkoperator.k8s.io +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/pod_webhook.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/pod_webhook.go new file mode 100644 index 0000000000..5cdbbd0e30 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/pod_webhook.go @@ -0,0 +1,17 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/register.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/register.go new file mode 100644 index 0000000000..f6eea9ab85 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/register.go @@ -0,0 +1,34 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Group = "sparkoperator.k8s.io" + Version = "v1beta2" +) + +// SchemeGroupVersion is the group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + +// Resource takes an unqualified resource and returns a Group-qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/scheduledsparkapplication_types.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/scheduledsparkapplication_types.go new file mode 100644 index 0000000000..8530d10ee2 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/scheduledsparkapplication_types.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +func init() { + SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{}) +} + +// ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication. +type ScheduledSparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // Schedule is a cron schedule on which the application should run. + Schedule string `json:"schedule"` + // Template is a template from which SparkApplication instances can be created. + Template SparkApplicationSpec `json:"template"` + // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + // +optional + // Defaults to false. + Suspend *bool `json:"suspend,omitempty"` + // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + // +optional + // Defaults to 1. + SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` + // FailedRunHistoryLimit is the number of past failed runs of the application to keep. + // +optional + // Defaults to 1. + FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` +} + +// ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication. +type ScheduledSparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // LastRun is the time when the last run of the application started. + // +nullable + LastRun metav1.Time `json:"lastRun,omitempty"` + // NextRun is the time when the next run of the application will start. + // +nullable + NextRun metav1.Time `json:"nextRun,omitempty"` + // LastRunName is the name of the SparkApplication for the most recent run of the application. + LastRunName string `json:"lastRunName,omitempty"` + // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. + PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` + // PastFailedRunNames keeps the names of SparkApplications for past failed runs. + PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` + // ScheduleState is the current scheduling state of the application. + ScheduleState ScheduleState `json:"scheduleState,omitempty"` + // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. + Reason string `json:"reason,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" +// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string +// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date +// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date + +// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API. +type ScheduledSparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ScheduledSparkApplicationSpec `json:"spec"` + Status ScheduledSparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication. +type ScheduledSparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledSparkApplication `json:"items"` +} + +type ConcurrencyPolicy string + +const ( + // ConcurrencyAllow allows SparkApplications to run concurrently. + ConcurrencyAllow ConcurrencyPolicy = "Allow" + // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous + // one hasn't finished yet. + ConcurrencyForbid ConcurrencyPolicy = "Forbid" + // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. + ConcurrencyReplace ConcurrencyPolicy = "Replace" +) + +type ScheduleState string + +const ( + ScheduleStateNew ScheduleState = "" + ScheduleStateValidating ScheduleState = "Validating" + ScheduleStateScheduled ScheduleState = "Scheduled" + ScheduleStateFailedValidation ScheduleState = "FailedValidation" +) diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/sparkapplication_types.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/sparkapplication_types.go new file mode 100644 index 0000000000..dcf0237b4f --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/sparkapplication_types.go @@ -0,0 +1,718 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +func init() { + SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{}) +} + +// SparkApplicationSpec defines the desired state of SparkApplication +// It carries every pieces of information a spark-submit command takes and recognizes. +type SparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // Suspend specifies whether the Spark operator should create Pods or not. + // If a SparkApplication is created with suspend set to true, no Pods are created + // by the Spark operator. If a SparkApplication is suspended after creation + // (i.e. the flag goes from false to true), the Spark operator will delete + // all active Pods associated with this SparkApplication. + // Users must design their Spark application to gracefully handle this. + Suspend bool `json:"suspend"` + // Type tells the type of the Spark application. + // +kubebuilder:validation:Enum={Java,Python,Scala,R} + Type SparkApplicationType `json:"type"` + // SparkVersion is the version of Spark the application uses. + SparkVersion string `json:"sparkVersion"` + // Mode is the deployment mode of the Spark application. + // +kubebuilder:validation:Enum={cluster,client} + Mode DeployMode `json:"mode,omitempty"` + // ProxyUser specifies the user to impersonate when submitting the application. + // It maps to the command-line flag "--proxy-user" in spark-submit. + // +optional + ProxyUser *string `json:"proxyUser,omitempty"` + // Image is the container image for the driver, executor, and init-container. Any custom container images for the + // driver, executor, or init-container takes precedence over this. + // +optional + Image *string `json:"image,omitempty"` + // ImagePullPolicy is the image pull policy for the driver, executor, and init-container. + // +optional + ImagePullPolicy *string `json:"imagePullPolicy,omitempty"` + // ImagePullSecrets is the list of image-pull secrets. + // +optional + ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` + // MainClass is the fully-qualified main class of the Spark application. + // This only applies to Java/Scala Spark applications. + // +optional + MainClass *string `json:"mainClass,omitempty"` + // MainFile is the path to a bundled JAR, Python, or R file of the application. + MainApplicationFile *string `json:"mainApplicationFile"` + // Arguments is a list of arguments to be passed to the application. + // +optional + Arguments []string `json:"arguments,omitempty"` + // SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + // spark-submit. + // +optional + SparkConf map[string]string `json:"sparkConf,omitempty"` + // HadoopConf carries user-specified Hadoop configuration properties as they would use the "--conf" option + // in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + // configuration properties. + // +optional + HadoopConf map[string]string `json:"hadoopConf,omitempty"` + // SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + // The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + // +optional + SparkConfigMap *string `json:"sparkConfigMap,omitempty"` + // HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + // The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + // +optional + HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"` + // Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors. + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty"` + // Driver is the driver specification. + Driver DriverSpec `json:"driver"` + // Executor is the executor specification. + Executor ExecutorSpec `json:"executor"` + // Deps captures all possible types of dependencies of a Spark application. + // +optional + Deps Dependencies `json:"deps,omitempty"` + // RestartPolicy defines the policy on if and in which conditions the controller should restart an application. + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` + // NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + // This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + // This field will be deprecated in future versions (at SparkApplicationSpec level). + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // FailureRetries is the number of times to retry a failed application before giving up. + // This is best effort and actual retry attempts can be >= the value specified. + // +optional + FailureRetries *int32 `json:"failureRetries,omitempty"` + // RetryInterval is the unit of intervals in seconds between submission retries. + // +optional + RetryInterval *int64 `json:"retryInterval,omitempty"` + // This sets the major Python version of the docker + // image used to run the driver and executor containers. Can either be 2 or 3, default 2. + // +optional + // +kubebuilder:validation:Enum={"2","3"} + PythonVersion *string `json:"pythonVersion,omitempty"` + // This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + // For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + // be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + // +optional + MemoryOverheadFactor *string `json:"memoryOverheadFactor,omitempty"` + // Monitoring configures how monitoring is handled. + // +optional + Monitoring *MonitoringSpec `json:"monitoring,omitempty"` + // BatchScheduler configures which batch scheduler will be used for scheduling + // +optional + BatchScheduler *string `json:"batchScheduler,omitempty"` + // TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + // after its termination. + // The SparkApplication object will be garbage collected if the current time is more than the + // TimeToLiveSeconds since its termination. + // +optional + TimeToLiveSeconds *int64 `json:"timeToLiveSeconds,omitempty"` + // BatchSchedulerOptions provides fine-grained control on how to batch scheduling. + // +optional + BatchSchedulerOptions *BatchSchedulerConfiguration `json:"batchSchedulerOptions,omitempty"` + // SparkUIOptions allows configuring the Service and the Ingress to expose the sparkUI + // +optional + SparkUIOptions *SparkUIConfiguration `json:"sparkUIOptions,omitempty"` + // DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver + // +optional + DriverIngressOptions []DriverIngressConfiguration `json:"driverIngressOptions,omitempty"` + // DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + // scheduler backend since Spark 3.0. + // +optional + DynamicAllocation *DynamicAllocation `json:"dynamicAllocation,omitempty"` +} + +// SparkApplicationStatus defines the observed state of SparkApplication +type SparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods + SparkApplicationID string `json:"sparkApplicationId,omitempty"` + // SubmissionID is a unique ID of the current submission of the application. + SubmissionID string `json:"submissionID,omitempty"` + // LastSubmissionAttemptTime is the time for the last application submission attempt. + // +nullable + LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` + // CompletionTime is the time when the application runs to completion if it does. + // +nullable + TerminationTime metav1.Time `json:"terminationTime,omitempty"` + // DriverInfo has information about the driver. + DriverInfo DriverInfo `json:"driverInfo"` + // AppState tells the overall application state. + AppState ApplicationState `json:"applicationState,omitempty"` + // ExecutorState records the state of executors by executor Pod names. + ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` + // ExecutionAttempts is the total number of attempts to run a submitted application to completion. + // Incremented upon each attempted run of the application and reset upon invalidation. + ExecutionAttempts int32 `json:"executionAttempts,omitempty"` + // SubmissionAttempts is the total number of attempts to submit an application to run. + // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" +// +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=boolean +// +kubebuilder:printcolumn:JSONPath=.status.applicationState.state,name=Status,type=string +// +kubebuilder:printcolumn:JSONPath=.status.executionAttempts,name=Attempts,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string +// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date + +// SparkApplication is the Schema for the sparkapplications API +type SparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec SparkApplicationSpec `json:"spec"` + Status SparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SparkApplicationList contains a list of SparkApplication +type SparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkApplication `json:"items"` +} + +// SparkApplicationType describes the type of a Spark application. +type SparkApplicationType string + +// Different types of Spark applications. +const ( + SparkApplicationTypeJava SparkApplicationType = "Java" + SparkApplicationTypeScala SparkApplicationType = "Scala" + SparkApplicationTypePython SparkApplicationType = "Python" + SparkApplicationTypeR SparkApplicationType = "R" +) + +// DeployMode describes the type of deployment of a Spark application. +type DeployMode string + +// Different types of deployments. +const ( + DeployModeCluster DeployMode = "cluster" + DeployModeClient DeployMode = "client" + DeployModeInClusterClient DeployMode = "in-cluster-client" +) + +// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +// This completely defines actions to be taken on any kind of Failures during an application run. +type RestartPolicy struct { + // Type specifies the RestartPolicyType. + // +kubebuilder:validation:Enum={Never,Always,OnFailure} + Type RestartPolicyType `json:"type,omitempty"` + + // OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + // This is best effort and actual retry attempts can be >= the value specified due to caching. + // These are required if RestartPolicy is OnFailure. + // +kubebuilder:validation:Minimum=0 + // +optional + OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` + + // OnFailureRetries the number of times to retry running an application before giving up. + // +kubebuilder:validation:Minimum=0 + // +optional + OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` + + // OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions. + // +kubebuilder:validation:Minimum=1 + // +optional + OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` + + // OnFailureRetryInterval is the interval in seconds between retries on failed runs. + // +kubebuilder:validation:Minimum=1 + // +optional + OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` +} + +type RestartPolicyType string + +const ( + RestartPolicyNever RestartPolicyType = "Never" + RestartPolicyOnFailure RestartPolicyType = "OnFailure" + RestartPolicyAlways RestartPolicyType = "Always" +) + +// BatchSchedulerConfiguration used to configure how to batch scheduling Spark Application +type BatchSchedulerConfiguration struct { + // Queue stands for the resource queue which the application belongs to, it's being used in Volcano batch scheduler. + // +optional + Queue *string `json:"queue,omitempty"` + // PriorityClassName stands for the name of k8s PriorityClass resource, it's being used in Volcano batch scheduler. + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + // Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + // If specified, volcano scheduler will consider it as the resources requested. + // +optional + Resources corev1.ResourceList `json:"resources,omitempty"` +} + +// SparkUIConfiguration is for driver UI specific configuration parameters. +type SparkUIConfiguration struct { + // ServicePort allows configuring the port at service level that might be different from the targetPort. + // TargetPort should be the same as the one defined in spark.ui.port + // +optional + ServicePort *int32 `json:"servicePort,omitempty"` + // ServicePortName allows configuring the name of the service port. + // This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + // Defaults to spark-driver-ui-port. + // +optional + ServicePortName *string `json:"servicePortName,omitempty"` + // ServiceType allows configuring the type of the service. Defaults to ClusterIP. + // +optional + ServiceType *corev1.ServiceType `json:"serviceType,omitempty"` + // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. + // +optional + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // ServiceLabels is a map of key,value pairs of labels that might be added to the service object. + // +optional + ServiceLabels map[string]string `json:"serviceLabels,omitempty"` + // IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class + // +optional + IngressAnnotations map[string]string `json:"ingressAnnotations,omitempty"` + // TlsHosts is useful If we need to declare SSL certificates to the ingress object + // +optional + IngressTLS []networkingv1.IngressTLS `json:"ingressTLS,omitempty"` +} + +// DriverIngressConfiguration is for driver ingress specific configuration parameters. +type DriverIngressConfiguration struct { + // ServicePort allows configuring the port at service level that might be different from the targetPort. + ServicePort *int32 `json:"servicePort"` + // ServicePortName allows configuring the name of the service port. + // This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + ServicePortName *string `json:"servicePortName"` + // ServiceType allows configuring the type of the service. Defaults to ClusterIP. + // +optional + ServiceType *corev1.ServiceType `json:"serviceType,omitempty"` + // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. + // +optional + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // ServiceLabels is a map of key,value pairs of labels that might be added to the service object. + // +optional + ServiceLabels map[string]string `json:"serviceLabels,omitempty"` + // IngressURLFormat is the URL for the ingress. + IngressURLFormat string `json:"ingressURLFormat,omitempty"` + // IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class + // +optional + IngressAnnotations map[string]string `json:"ingressAnnotations,omitempty"` + // TlsHosts is useful If we need to declare SSL certificates to the ingress object + // +optional + IngressTLS []networkingv1.IngressTLS `json:"ingressTLS,omitempty"` +} + +// ApplicationStateType represents the type of the current state of an application. +type ApplicationStateType string + +// Different states an application may have. +const ( + ApplicationStateNew ApplicationStateType = "" + ApplicationStateSubmitted ApplicationStateType = "SUBMITTED" + ApplicationStateRunning ApplicationStateType = "RUNNING" + ApplicationStateCompleted ApplicationStateType = "COMPLETED" + ApplicationStateFailed ApplicationStateType = "FAILED" + ApplicationStateFailedSubmission ApplicationStateType = "SUBMISSION_FAILED" + ApplicationStatePendingRerun ApplicationStateType = "PENDING_RERUN" + ApplicationStateInvalidating ApplicationStateType = "INVALIDATING" + ApplicationStateSucceeding ApplicationStateType = "SUCCEEDING" + ApplicationStateFailing ApplicationStateType = "FAILING" + ApplicationStateSuspending ApplicationStateType = "SUSPENDING" + ApplicationStateSuspended ApplicationStateType = "SUSPENDED" + ApplicationStateResuming ApplicationStateType = "RESUMING" + ApplicationStateUnknown ApplicationStateType = "UNKNOWN" +) + +// ApplicationState tells the current state of the application and an error message in case of failures. +type ApplicationState struct { + State ApplicationStateType `json:"state"` + ErrorMessage string `json:"errorMessage,omitempty"` +} + +// DriverState tells the current state of a spark driver. +type DriverState string + +// Different states a spark driver may have. +const ( + DriverStatePending DriverState = "PENDING" + DriverStateRunning DriverState = "RUNNING" + DriverStateCompleted DriverState = "COMPLETED" + DriverStateFailed DriverState = "FAILED" + DriverStateUnknown DriverState = "UNKNOWN" +) + +// ExecutorState tells the current state of an executor. +type ExecutorState string + +// Different states an executor may have. +const ( + ExecutorStatePending ExecutorState = "PENDING" + ExecutorStateRunning ExecutorState = "RUNNING" + ExecutorStateCompleted ExecutorState = "COMPLETED" + ExecutorStateFailed ExecutorState = "FAILED" + ExecutorStateUnknown ExecutorState = "UNKNOWN" +) + +// Dependencies specifies all possible types of dependencies of a Spark application. +type Dependencies struct { + // Jars is a list of JAR files the Spark application depends on. + // +optional + Jars []string `json:"jars,omitempty"` + // Files is a list of files the Spark application depends on. + // +optional + Files []string `json:"files,omitempty"` + // PyFiles is a list of Python files the Spark application depends on. + // +optional + PyFiles []string `json:"pyFiles,omitempty"` + // Packages is a list of maven coordinates of jars to include on the driver and executor + // classpaths. This will search the local maven repo, then maven central and any additional + // remote repositories given by the "repositories" option. + // Each package should be of the form "groupId:artifactId:version". + // +optional + Packages []string `json:"packages,omitempty"` + // ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + // dependencies provided in Packages to avoid dependency conflicts. + // +optional + ExcludePackages []string `json:"excludePackages,omitempty"` + // Repositories is a list of additional remote repositories to search for the maven coordinate + // given with the "packages" option. + // +optional + Repositories []string `json:"repositories,omitempty"` + // Archives is a list of archives to be extracted into the working directory of each executor. + // +optional + Archives []string `json:"archives,omitempty"` +} + +// SparkPodSpec defines common things that can be customized for a Spark driver or executor pod. +// TODO: investigate if we should use v1.PodSpec and limit what can be set instead. +type SparkPodSpec struct { + // Template is a pod template that can be used to define the driver or executor pod configurations that Spark configurations do not support. + // Spark version >= 3.0.0 is required. + // Ref: https://spark.apache.org/docs/latest/running-on-kubernetes.html#pod-template. + // +optional + // +kubebuilder:validation:Schemaless + // +kubebuilder:validation:Type:=object + // +kubebuilder:pruning:PreserveUnknownFields + Template *corev1.PodTemplateSpec `json:"template,omitempty"` + // Cores maps to `spark.driver.cores` or `spark.executor.cores` for the driver and executors, respectively. + // +optional + // +kubebuilder:validation:Minimum=1 + Cores *int32 `json:"cores,omitempty"` + // CoreLimit specifies a hard limit on CPU cores for the pod. + // Optional + CoreLimit *string `json:"coreLimit,omitempty"` + // Memory is the amount of memory to request for the pod. + // +optional + Memory *string `json:"memory,omitempty"` + // MemoryOverhead is the amount of off-heap memory to allocate in cluster mode, in MiB unless otherwise specified. + // +optional + MemoryOverhead *string `json:"memoryOverhead,omitempty"` + // GPU specifies GPU requirement for the pod. + // +optional + GPU *GPUSpec `json:"gpu,omitempty"` + // Image is the container image to use. Overrides Spec.Image if set. + // +optional + Image *string `json:"image,omitempty"` + // ConfigMaps carries information of other ConfigMaps to add to the pod. + // +optional + ConfigMaps []NamePath `json:"configMaps,omitempty"` + // Secrets carries information of secrets to add to the pod. + // +optional + Secrets []SecretInfo `json:"secrets,omitempty"` + // Env carries the environment variables to add to the pod. + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + // EnvVars carries the environment variables to add to the pod. + // Deprecated. Consider using `env` instead. + // +optional + EnvVars map[string]string `json:"envVars,omitempty"` + // EnvFrom is a list of sources to populate environment variables in the container. + // +optional + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` + // EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + // Deprecated. Consider using `env` instead. + // +optional + EnvSecretKeyRefs map[string]NameKey `json:"envSecretKeyRefs,omitempty"` + // Labels are the Kubernetes labels to be added to the pod. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Annotations are the Kubernetes annotations to be added to the pod. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + // VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem. + // +optional + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + // Affinity specifies the affinity/anti-affinity settings for the pod. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // PodSecurityContext specifies the PodSecurityContext to apply. + // +optional + PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + // SecurityContext specifies the container's SecurityContext to apply. + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + // SchedulerName specifies the scheduler that will be used for scheduling + // +optional + SchedulerName *string `json:"schedulerName,omitempty"` + // Sidecars is a list of sidecar containers that run along side the main Spark container. + // +optional + Sidecars []corev1.Container `json:"sidecars,omitempty"` + // InitContainers is a list of init-containers that run to completion before the main Spark container. + // +optional + InitContainers []corev1.Container `json:"initContainers,omitempty"` + // HostNetwork indicates whether to request host networking for the pod or not. + // +optional + HostNetwork *bool `json:"hostNetwork,omitempty"` + // NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + // This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // DnsConfig dns settings for the pod, following the Kubernetes specifications. + // +optional + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` + // Termination grace period seconds for the pod + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + // ServiceAccount is the name of the custom Kubernetes service account used by the pod. + // +optional + ServiceAccount *string `json:"serviceAccount,omitempty"` + // HostAliases settings for the pod, following the Kubernetes specifications. + // +optional + HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"` + // ShareProcessNamespace settings for the pod, following the Kubernetes specifications. + // +optional + ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty"` +} + +// DriverSpec is specification of the driver. +type DriverSpec struct { + SparkPodSpec `json:",inline"` + // PodName is the name of the driver pod that the user creates. This is used for the + // in-cluster client mode in which the user creates a client pod where the driver of + // the user application runs. It's an error to set this field if Mode is not + // in-cluster-client. + // +optional + // +kubebuilder:validation:Pattern=[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* + PodName *string `json:"podName,omitempty"` + // CoreRequest is the physical CPU core request for the driver. + // Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + // +optional + CoreRequest *string `json:"coreRequest,omitempty"` + // JavaOptions is a string of extra JVM options to pass to the driver. For instance, + // GC settings or other logging. + // +optional + JavaOptions *string `json:"javaOptions,omitempty"` + // Lifecycle for running preStop or postStart commands + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` + // KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + // other Kubernetes resources. Default to https://kubernetes.default.svc. + // +optional + KubernetesMaster *string `json:"kubernetesMaster,omitempty"` + // ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + // executors to connect to the driver. + // +optional + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // ServiceLabels defines the labels to be added to the Kubernetes headless service used by + // executors to connect to the driver. + // +optional + ServiceLabels map[string]string `json:"serviceLabels,omitempty"` + // Ports settings for the pods, following the Kubernetes specifications. + // +optional + Ports []Port `json:"ports,omitempty"` + // PriorityClassName is the name of the PriorityClass for the driver pod. + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` +} + +// ExecutorSpec is specification of the executor. +type ExecutorSpec struct { + SparkPodSpec `json:",inline"` + // Instances is the number of executor instances. + // +optional + // +kubebuilder:validation:Minimum=1 + Instances *int32 `json:"instances,omitempty"` + // CoreRequest is the physical CPU core request for the executors. + // Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. + // +optional + CoreRequest *string `json:"coreRequest,omitempty"` + // JavaOptions is a string of extra JVM options to pass to the executors. For instance, + // GC settings or other logging. + // +optional + JavaOptions *string `json:"javaOptions,omitempty"` + // Lifecycle for running preStop or postStart commands + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` + // DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + // Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + // +optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + // Ports settings for the pods, following the Kubernetes specifications. + // +optional + Ports []Port `json:"ports,omitempty"` + // PriorityClassName is the name of the PriorityClass for the executor pod. + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` +} + +// NamePath is a pair of a name and a path to which the named objects should be mounted to. +type NamePath struct { + Name string `json:"name"` + Path string `json:"path"` +} + +// SecretType tells the type of a secret. +type SecretType string + +// An enumeration of secret types supported. +const ( + // SecretTypeGCPServiceAccount is for secrets from a GCP service account Json key file that needs + // the environment variable GOOGLE_APPLICATION_CREDENTIALS. + SecretTypeGCPServiceAccount SecretType = "GCPServiceAccount" + // SecretTypeHadoopDelegationToken is for secrets from an Hadoop delegation token that needs the + // environment variable HADOOP_TOKEN_FILE_LOCATION. + SecretTypeHadoopDelegationToken SecretType = "HadoopDelegationToken" + // SecretTypeGeneric is for secrets that needs no special handling. + SecretTypeGeneric SecretType = "Generic" +) + +// DriverInfo captures information about the driver. +type DriverInfo struct { + WebUIServiceName string `json:"webUIServiceName,omitempty"` + // UI Details for the UI created via ClusterIP service accessible from within the cluster. + WebUIAddress string `json:"webUIAddress,omitempty"` + WebUIPort int32 `json:"webUIPort,omitempty"` + // Ingress Details if an ingress for the UI was created. + WebUIIngressName string `json:"webUIIngressName,omitempty"` + WebUIIngressAddress string `json:"webUIIngressAddress,omitempty"` + PodName string `json:"podName,omitempty"` +} + +// SecretInfo captures information of a secret. +type SecretInfo struct { + Name string `json:"name"` + Path string `json:"path"` + Type SecretType `json:"secretType"` +} + +// NameKey represents the name and key of a SecretKeyRef. +type NameKey struct { + Name string `json:"name"` + Key string `json:"key"` +} + +// Port represents the port definition in the pods objects. +type Port struct { + Name string `json:"name"` + Protocol string `json:"protocol"` + ContainerPort int32 `json:"containerPort"` +} + +// MonitoringSpec defines the monitoring specification. +type MonitoringSpec struct { + // ExposeDriverMetrics specifies whether to expose metrics on the driver. + ExposeDriverMetrics bool `json:"exposeDriverMetrics"` + // ExposeExecutorMetrics specifies whether to expose metrics on the executors. + ExposeExecutorMetrics bool `json:"exposeExecutorMetrics"` + // MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + // +optional + // If not specified, the content in spark-docker/conf/metrics.properties will be used. + MetricsProperties *string `json:"metricsProperties,omitempty"` + // MetricsPropertiesFile is the container local path of file metrics.properties for configuring + // the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + // +optional + MetricsPropertiesFile *string `json:"metricsPropertiesFile,omitempty"` + // Prometheus is for configuring the Prometheus JMX exporter. + // +optional + Prometheus *PrometheusSpec `json:"prometheus,omitempty"` +} + +// PrometheusSpec defines the Prometheus specification when Prometheus is to be used for +// collecting and exposing metrics. +type PrometheusSpec struct { + // JmxExporterJar is the path to the Prometheus JMX exporter jar in the container. + JmxExporterJar string `json:"jmxExporterJar"` + // Port is the port of the HTTP server run by the Prometheus JMX exporter. + // If not specified, 8090 will be used as the default. + // +kubebuilder:validation:Minimum=1024 + // +kubebuilder:validation:Maximum=49151 + // +optional + Port *int32 `json:"port,omitempty"` + // PortName is the port name of prometheus JMX exporter port. + // If not specified, jmx-exporter will be used as the default. + // +optional + PortName *string `json:"portName,omitempty"` + // ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + // ConfigFile takes precedence over Configuration, which is shown below. + // +optional + ConfigFile *string `json:"configFile,omitempty"` + // Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + // If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + // Configuration has no effect if ConfigFile is set. + // +optional + Configuration *string `json:"configuration,omitempty"` +} + +type GPUSpec struct { + // Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu + Name string `json:"name"` + // Quantity is the number of GPUs to request for driver or executor. + Quantity int64 `json:"quantity"` +} + +// DynamicAllocation contains configuration options for dynamic allocation. +type DynamicAllocation struct { + // Enabled controls whether dynamic allocation is enabled or not. + Enabled bool `json:"enabled,omitempty"` + // InitialExecutors is the initial number of executors to request. If .spec.executor.instances + // is also set, the initial number of executors is set to the bigger of that and this option. + // +optional + InitialExecutors *int32 `json:"initialExecutors,omitempty"` + // MinExecutors is the lower bound for the number of executors if dynamic allocation is enabled. + // +optional + MinExecutors *int32 `json:"minExecutors,omitempty"` + // MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled. + // +optional + MaxExecutors *int32 `json:"maxExecutors,omitempty"` + // ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + // shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + // +optional + ShuffleTrackingTimeout *int64 `json:"shuffleTrackingTimeout,omitempty"` +} diff --git a/vendor/github.com/kubeflow/spark-operator/api/v1beta2/zz_generated.deepcopy.go b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..635e19af58 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1121 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationState) DeepCopyInto(out *ApplicationState) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState. +func (in *ApplicationState) DeepCopy() *ApplicationState { + if in == nil { + return nil + } + out := new(ApplicationState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BatchSchedulerConfiguration) DeepCopyInto(out *BatchSchedulerConfiguration) { + *out = *in + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(string) + **out = **in + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchSchedulerConfiguration. +func (in *BatchSchedulerConfiguration) DeepCopy() *BatchSchedulerConfiguration { + if in == nil { + return nil + } + out := new(BatchSchedulerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dependencies) DeepCopyInto(out *Dependencies) { + *out = *in + if in.Jars != nil { + in, out := &in.Jars, &out.Jars + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PyFiles != nil { + in, out := &in.PyFiles, &out.PyFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Packages != nil { + in, out := &in.Packages, &out.Packages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludePackages != nil { + in, out := &in.ExcludePackages, &out.ExcludePackages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Repositories != nil { + in, out := &in.Repositories, &out.Repositories + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Archives != nil { + in, out := &in.Archives, &out.Archives + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies. +func (in *Dependencies) DeepCopy() *Dependencies { + if in == nil { + return nil + } + out := new(Dependencies) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverInfo) DeepCopyInto(out *DriverInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo. +func (in *DriverInfo) DeepCopy() *DriverInfo { + if in == nil { + return nil + } + out := new(DriverInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) { + *out = *in + if in.ServicePort != nil { + in, out := &in.ServicePort, &out.ServicePort + *out = new(int32) + **out = **in + } + if in.ServicePortName != nil { + in, out := &in.ServicePortName, &out.ServicePortName + *out = new(string) + **out = **in + } + if in.ServiceType != nil { + in, out := &in.ServiceType, &out.ServiceType + *out = new(v1.ServiceType) + **out = **in + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressAnnotations != nil { + in, out := &in.IngressAnnotations, &out.IngressAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressTLS != nil { + in, out := &in.IngressTLS, &out.IngressTLS + *out = make([]networkingv1.IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration. +func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration { + if in == nil { + return nil + } + out := new(DriverIngressConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { + *out = *in + in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec) + if in.PodName != nil { + in, out := &in.PodName, &out.PodName + *out = new(string) + **out = **in + } + if in.CoreRequest != nil { + in, out := &in.CoreRequest, &out.CoreRequest + *out = new(string) + **out = **in + } + if in.JavaOptions != nil { + in, out := &in.JavaOptions, &out.JavaOptions + *out = new(string) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.KubernetesMaster != nil { + in, out := &in.KubernetesMaster, &out.KubernetesMaster + *out = new(string) + **out = **in + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]Port, len(*in)) + copy(*out, *in) + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. +func (in *DriverSpec) DeepCopy() *DriverSpec { + if in == nil { + return nil + } + out := new(DriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicAllocation) DeepCopyInto(out *DynamicAllocation) { + *out = *in + if in.InitialExecutors != nil { + in, out := &in.InitialExecutors, &out.InitialExecutors + *out = new(int32) + **out = **in + } + if in.MinExecutors != nil { + in, out := &in.MinExecutors, &out.MinExecutors + *out = new(int32) + **out = **in + } + if in.MaxExecutors != nil { + in, out := &in.MaxExecutors, &out.MaxExecutors + *out = new(int32) + **out = **in + } + if in.ShuffleTrackingTimeout != nil { + in, out := &in.ShuffleTrackingTimeout, &out.ShuffleTrackingTimeout + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicAllocation. +func (in *DynamicAllocation) DeepCopy() *DynamicAllocation { + if in == nil { + return nil + } + out := new(DynamicAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { + *out = *in + in.SparkPodSpec.DeepCopyInto(&out.SparkPodSpec) + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.CoreRequest != nil { + in, out := &in.CoreRequest, &out.CoreRequest + *out = new(string) + **out = **in + } + if in.JavaOptions != nil { + in, out := &in.JavaOptions, &out.JavaOptions + *out = new(string) + **out = **in + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]Port, len(*in)) + copy(*out, *in) + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec. +func (in *ExecutorSpec) DeepCopy() *ExecutorSpec { + if in == nil { + return nil + } + out := new(ExecutorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GPUSpec) DeepCopyInto(out *GPUSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec. +func (in *GPUSpec) DeepCopy() *GPUSpec { + if in == nil { + return nil + } + out := new(GPUSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { + *out = *in + if in.MetricsProperties != nil { + in, out := &in.MetricsProperties, &out.MetricsProperties + *out = new(string) + **out = **in + } + if in.MetricsPropertiesFile != nil { + in, out := &in.MetricsPropertiesFile, &out.MetricsPropertiesFile + *out = new(string) + **out = **in + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(PrometheusSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. +func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { + if in == nil { + return nil + } + out := new(MonitoringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameKey) DeepCopyInto(out *NameKey) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey. +func (in *NameKey) DeepCopy() *NameKey { + if in == nil { + return nil + } + out := new(NameKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamePath) DeepCopyInto(out *NamePath) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath. +func (in *NamePath) DeepCopy() *NamePath { + if in == nil { + return nil + } + out := new(NamePath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Port) DeepCopyInto(out *Port) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. +func (in *Port) DeepCopy() *Port { + if in == nil { + return nil + } + out := new(Port) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.PortName != nil { + in, out := &in.PortName, &out.PortName + *out = new(string) + **out = **in + } + if in.ConfigFile != nil { + in, out := &in.ConfigFile, &out.ConfigFile + *out = new(string) + **out = **in + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. +func (in *PrometheusSpec) DeepCopy() *PrometheusSpec { + if in == nil { + return nil + } + out := new(PrometheusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) { + *out = *in + if in.OnSubmissionFailureRetries != nil { + in, out := &in.OnSubmissionFailureRetries, &out.OnSubmissionFailureRetries + *out = new(int32) + **out = **in + } + if in.OnFailureRetries != nil { + in, out := &in.OnFailureRetries, &out.OnFailureRetries + *out = new(int32) + **out = **in + } + if in.OnSubmissionFailureRetryInterval != nil { + in, out := &in.OnSubmissionFailureRetryInterval, &out.OnSubmissionFailureRetryInterval + *out = new(int64) + **out = **in + } + if in.OnFailureRetryInterval != nil { + in, out := &in.OnFailureRetryInterval, &out.OnFailureRetryInterval + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy. +func (in *RestartPolicy) DeepCopy() *RestartPolicy { + if in == nil { + return nil + } + out := new(RestartPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication. +func (in *ScheduledSparkApplication) DeepCopy() *ScheduledSparkApplication { + if in == nil { + return nil + } + out := new(ScheduledSparkApplication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledSparkApplication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScheduledSparkApplication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList. +func (in *ScheduledSparkApplicationList) DeepCopy() *ScheduledSparkApplicationList { + if in == nil { + return nil + } + out := new(ScheduledSparkApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledSparkApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplicationSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.SuccessfulRunHistoryLimit != nil { + in, out := &in.SuccessfulRunHistoryLimit, &out.SuccessfulRunHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedRunHistoryLimit != nil { + in, out := &in.FailedRunHistoryLimit, &out.FailedRunHistoryLimit + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec. +func (in *ScheduledSparkApplicationSpec) DeepCopy() *ScheduledSparkApplicationSpec { + if in == nil { + return nil + } + out := new(ScheduledSparkApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkApplicationStatus) { + *out = *in + in.LastRun.DeepCopyInto(&out.LastRun) + in.NextRun.DeepCopyInto(&out.NextRun) + if in.PastSuccessfulRunNames != nil { + in, out := &in.PastSuccessfulRunNames, &out.PastSuccessfulRunNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PastFailedRunNames != nil { + in, out := &in.PastFailedRunNames, &out.PastFailedRunNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus. +func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplicationStatus { + if in == nil { + return nil + } + out := new(ScheduledSparkApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretInfo) DeepCopyInto(out *SecretInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo. +func (in *SecretInfo) DeepCopy() *SecretInfo { + if in == nil { + return nil + } + out := new(SecretInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplication) DeepCopyInto(out *SparkApplication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication. +func (in *SparkApplication) DeepCopy() *SparkApplication { + if in == nil { + return nil + } + out := new(SparkApplication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkApplication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SparkApplication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList. +func (in *SparkApplicationList) DeepCopy() *SparkApplicationList { + if in == nil { + return nil + } + out := new(SparkApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { + *out = *in + if in.ProxyUser != nil { + in, out := &in.ProxyUser, &out.ProxyUser + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(string) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MainClass != nil { + in, out := &in.MainClass, &out.MainClass + *out = new(string) + **out = **in + } + if in.MainApplicationFile != nil { + in, out := &in.MainApplicationFile, &out.MainApplicationFile + *out = new(string) + **out = **in + } + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SparkConf != nil { + in, out := &in.SparkConf, &out.SparkConf + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.HadoopConf != nil { + in, out := &in.HadoopConf, &out.HadoopConf + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SparkConfigMap != nil { + in, out := &in.SparkConfigMap, &out.SparkConfigMap + *out = new(string) + **out = **in + } + if in.HadoopConfigMap != nil { + in, out := &in.HadoopConfigMap, &out.HadoopConfigMap + *out = new(string) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Driver.DeepCopyInto(&out.Driver) + in.Executor.DeepCopyInto(&out.Executor) + in.Deps.DeepCopyInto(&out.Deps) + in.RestartPolicy.DeepCopyInto(&out.RestartPolicy) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FailureRetries != nil { + in, out := &in.FailureRetries, &out.FailureRetries + *out = new(int32) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(int64) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.MemoryOverheadFactor != nil { + in, out := &in.MemoryOverheadFactor, &out.MemoryOverheadFactor + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringSpec) + (*in).DeepCopyInto(*out) + } + if in.BatchScheduler != nil { + in, out := &in.BatchScheduler, &out.BatchScheduler + *out = new(string) + **out = **in + } + if in.TimeToLiveSeconds != nil { + in, out := &in.TimeToLiveSeconds, &out.TimeToLiveSeconds + *out = new(int64) + **out = **in + } + if in.BatchSchedulerOptions != nil { + in, out := &in.BatchSchedulerOptions, &out.BatchSchedulerOptions + *out = new(BatchSchedulerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SparkUIOptions != nil { + in, out := &in.SparkUIOptions, &out.SparkUIOptions + *out = new(SparkUIConfiguration) + (*in).DeepCopyInto(*out) + } + if in.DriverIngressOptions != nil { + in, out := &in.DriverIngressOptions, &out.DriverIngressOptions + *out = make([]DriverIngressConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DynamicAllocation != nil { + in, out := &in.DynamicAllocation, &out.DynamicAllocation + *out = new(DynamicAllocation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec. +func (in *SparkApplicationSpec) DeepCopy() *SparkApplicationSpec { + if in == nil { + return nil + } + out := new(SparkApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) { + *out = *in + in.LastSubmissionAttemptTime.DeepCopyInto(&out.LastSubmissionAttemptTime) + in.TerminationTime.DeepCopyInto(&out.TerminationTime) + out.DriverInfo = in.DriverInfo + out.AppState = in.AppState + if in.ExecutorState != nil { + in, out := &in.ExecutorState, &out.ExecutorState + *out = make(map[string]ExecutorState, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus. +func (in *SparkApplicationStatus) DeepCopy() *SparkApplicationStatus { + if in == nil { + return nil + } + out := new(SparkApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) { + *out = *in + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(v1.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(int32) + **out = **in + } + if in.CoreLimit != nil { + in, out := &in.CoreLimit, &out.CoreLimit + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.MemoryOverhead != nil { + in, out := &in.MemoryOverhead, &out.MemoryOverhead + *out = new(string) + **out = **in + } + if in.GPU != nil { + in, out := &in.GPU, &out.GPU + *out = new(GPUSpec) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]NamePath, len(*in)) + copy(*out, *in) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretInfo, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvSecretKeyRefs != nil { + in, out := &in.EnvSecretKeyRefs, &out.EnvSecretKeyRefs + *out = make(map[string]NameKey, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodSecurityContext != nil { + in, out := &in.PodSecurityContext, &out.PodSecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(v1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(string) + **out = **in + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]v1.HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec. +func (in *SparkPodSpec) DeepCopy() *SparkPodSpec { + if in == nil { + return nil + } + out := new(SparkPodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkUIConfiguration) DeepCopyInto(out *SparkUIConfiguration) { + *out = *in + if in.ServicePort != nil { + in, out := &in.ServicePort, &out.ServicePort + *out = new(int32) + **out = **in + } + if in.ServicePortName != nil { + in, out := &in.ServicePortName, &out.ServicePortName + *out = new(string) + **out = **in + } + if in.ServiceType != nil { + in, out := &in.ServiceType, &out.ServiceType + *out = new(v1.ServiceType) + **out = **in + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressAnnotations != nil { + in, out := &in.IngressAnnotations, &out.IngressAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressTLS != nil { + in, out := &in.IngressTLS, &out.IngressTLS + *out = make([]networkingv1.IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkUIConfiguration. +func (in *SparkUIConfiguration) DeepCopy() *SparkUIConfiguration { + if in == nil { + return nil + } + out := new(SparkUIConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/common/constants.go b/vendor/github.com/kubeflow/spark-operator/pkg/common/constants.go new file mode 100644 index 0000000000..59ffc87081 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/common/constants.go @@ -0,0 +1,50 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + ErrorCodePodAlreadyExists = "code=409" +) + +const ( + SparkApplicationFinalizerName = "sparkoperator.k8s.io/finalizer" + ScheduledSparkApplicationFinalizerName = "sparkoperator.k8s.io/finalizer" +) + +const ( + RSAKeySize = 2048 +) + +const ( + CAKeyPem = "ca-key.pem" + CACertPem = "ca-cert.pem" + ServerKeyPem = "server-key.pem" + ServerCertPem = "server-cert.pem" +) + +// Kubernetes volume types. +const ( + VolumeTypeEmptyDir = "emptyDir" + VolumeTypeHostPath = "hostPath" + VolumeTypeNFS = "nfs" + VolumeTypePersistentVolumeClaim = "persistentVolumeClaim" +) + +const ( + // Epsilon is a small number used to compare 64 bit floating point numbers. + Epsilon = 1e-9 +) diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/common/doc.go b/vendor/github.com/kubeflow/spark-operator/pkg/common/doc.go new file mode 100644 index 0000000000..4ff7b522bc --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/common/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Package common contains code that deals with configuration of Spark driver and executor pods, e.g., mounting +// user-specified ConfigMaps, volumes, secrets, etc. diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/common/event.go b/vendor/github.com/kubeflow/spark-operator/pkg/common/event.go new file mode 100644 index 0000000000..00243c9bff --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/common/event.go @@ -0,0 +1,64 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// SparkApplication events +const ( + EventSparkApplicationAdded = "SparkApplicationAdded" + + EventSparkApplicationSubmitted = "SparkApplicationSubmitted" + + EventSparkApplicationSubmissionFailed = "SparkApplicationSubmissionFailed" + + EventSparkApplicationCompleted = "SparkApplicationCompleted" + + EventSparkApplicationFailed = "SparkApplicationFailed" + + EventSparkApplicationPendingRerun = "SparkApplicationPendingRerun" + + EventSparkApplicationSuspending = "SparkApplicationSuspending" + + EventSparkApplicationSuspended = "SparkApplicationSuspended" + + EventSparkApplicationResuming = "SparkApplicationResuming" +) + +// Spark driver events +const ( + EventSparkDriverPending = "SparkDriverPending" + + EventSparkDriverRunning = "SparkDriverRunning" + + EventSparkDriverCompleted = "SparkDriverCompleted" + + EventSparkDriverFailed = "SparkDriverFailed" + + EventSparkDriverUnknown = "SparkDriverUnknown" +) + +// Spark executor events +const ( + EventSparkExecutorPending = "SparkExecutorPending" + + EventSparkExecutorRunning = "SparkExecutorRunning" + + EventSparkExecutorCompleted = "SparkExecutorCompleted" + + EventSparkExecutorFailed = "SparkExecutorFailed" + + EventSparkExecutorUnknown = "SparkExecutorUnknown" +) diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/common/metrics.go b/vendor/github.com/kubeflow/spark-operator/pkg/common/metrics.go new file mode 100644 index 0000000000..7e38dd7ba3 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/common/metrics.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Spark application metric names. +const ( + MetricSparkApplicationCount = "spark_application_count" + + MetricSparkApplicationSubmitCount = "spark_application_submit_count" + + MetricSparkApplicationFailedSubmissionCount = "spark_application_failed_submission_count" + + MetricSparkApplicationRunningCount = "spark_application_running_count" + + MetricSparkApplicationSuccessCount = "spark_application_success_count" + + MetricSparkApplicationFailureCount = "spark_application_failure_count" + + MetricSparkApplicationSuccessExecutionTimeSeconds = "spark_application_success_execution_time_seconds" + + MetricSparkApplicationFailureExecutionTimeSeconds = "spark_application_failure_execution_time_seconds" + + MetricSparkApplicationStartLatencySeconds = "spark_application_start_latency_seconds" + + MetricSparkApplicationStartLatencySecondsHistogram = "spark_application_start_latency_seconds_histogram" +) + +// Spark executor metric names. +const ( + MetricSparkExecutorRunningCount = "spark_executor_running_count" + + MetricSparkExecutorSuccessCount = "spark_executor_success_count" + + MetricSparkExecutorFailureCount = "spark_executor_failure_count" +) diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/common/prometheus.go b/vendor/github.com/kubeflow/spark-operator/pkg/common/prometheus.go new file mode 100644 index 0000000000..2e141f3270 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/common/prometheus.go @@ -0,0 +1,139 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + // PrometheusConfigMapNameSuffix is the name prefix of the Prometheus ConfigMap. + PrometheusConfigMapNameSuffix = "prom-conf" + + // PrometheusConfigMapMountPath is the mount path of the Prometheus ConfigMap. + PrometheusConfigMapMountPath = "/etc/metrics/conf" +) + +const ( + MetricsPropertiesKey = "metrics.properties" + PrometheusConfigKey = "prometheus.yaml" + PrometheusScrapeAnnotation = "prometheus.io/scrape" + PrometheusPortAnnotation = "prometheus.io/port" + PrometheusPathAnnotation = "prometheus.io/path" +) + +// DefaultMetricsProperties is the default content of metrics.properties. +const DefaultMetricsProperties = ` +*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink +driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource +executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource` + +// DefaultPrometheusConfiguration is the default content of prometheus.yaml. +const DefaultPrometheusConfiguration = ` +lowercaseOutputName: true +attrNameSnakeCase: true +rules: + - pattern: metrics<>Value + name: spark_driver_$3_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_streaming_driver_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_structured_streaming_driver_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + query_name: "$3" + - pattern: metrics<>Value + name: spark_executor_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_driver_DAGScheduler_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_HiveExternalCatalog_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_CodeGenerator_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_LiveListenerBus_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_driver_LiveListenerBus_$3 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_executor_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Value + name: spark_executor_$4_$5 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_executor_HiveExternalCatalog_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_executor_CodeGenerator_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" +` + +// DefaultPrometheusJavaAgentPort is the default port used by the Prometheus JMX exporter. +const DefaultPrometheusJavaAgentPort int32 = 8090 + +// DefaultPrometheusPortProtocol is the default protocol used by the Prometheus JMX exporter. +const DefaultPrometheusPortProtocol string = "TCP" + +// DefaultPrometheusPortName is the default port name used by the Prometheus JMX exporter. +const DefaultPrometheusPortName string = "jmx-exporter" diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/common/spark.go b/vendor/github.com/kubeflow/spark-operator/pkg/common/spark.go new file mode 100644 index 0000000000..94ae2c51da --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/common/spark.go @@ -0,0 +1,376 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Spark environment variables. +const ( + EnvSparkHome = "SPARK_HOME" + + EnvKubernetesServiceHost = "KUBERNETES_SERVICE_HOST" + + EnvKubernetesServicePort = "KUBERNETES_SERVICE_PORT" +) + +// Spark properties. +const ( + // SparkAppName is the configuration property for application name. + SparkAppName = "spark.app.name" + + SparkDriverCores = "spark.driver.cores" + + SparkDriverMemory = "spark.driver.memory" + + SparkDriverMemoryOverhead = "spark.driver.memoryOverhead" + + SparkExecutorInstances = "spark.executor.instances" + + SparkExecutorEnvTemplate = "spark.executor.env.%s" + + SparkExecutorCores = "spark.executor.cores" + + SparkExecutorMemory = "spark.executor.memory" + + SparkExecutorMemoryOverhead = "spark.executor.memoryOverhead" + + SparkUIProxyBase = "spark.ui.proxyBase" + + SparkUIProxyRedirectURI = "spark.ui.proxyRedirectUri" +) + +// Spark on Kubernetes properties. +const ( + + // SparkKubernetesDriverMaster is the Spark configuration key for specifying the Kubernetes master the driver use + // to manage executor pods and other Kubernetes resources. + SparkKubernetesDriverMaster = "spark.kubernetes.driver.master" + + // SparkKubernetesNamespace is the configuration property for application namespace. + SparkKubernetesNamespace = "spark.kubernetes.namespace" + + // SparkKubernetesContainerImage is the configuration property for specifying the unified container image. + SparkKubernetesContainerImage = "spark.kubernetes.container.image" + + // SparkKubernetesContainerImagePullPolicy is the configuration property for specifying the container image pull policy. + SparkKubernetesContainerImagePullPolicy = "spark.kubernetes.container.image.pullPolicy" + + // SparkKubernetesContainerImagePullSecrets is the configuration property for specifying the comma-separated list of image-pull + // secrets. + SparkKubernetesContainerImagePullSecrets = "spark.kubernetes.container.image.pullSecrets" + + SparkKubernetesAllocationBatchSize = "spark.kubernetes.allocation.batch.size" + + SparkKubernetesAllocationBatchDelay = "spark.kubernetes.allocation.batch.delay" + + // SparkKubernetesAuthenticateDriverServiceAccountName is the Spark configuration key for specifying name of the Kubernetes service + // account used by the driver pod. + SparkKubernetesAuthenticateDriverServiceAccountName = "spark.kubernetes.authenticate.driver.serviceAccountName" + + // account used by the executor pod. + SparkKubernetesAuthenticateExecutorServiceAccountName = "spark.kubernetes.authenticate.executor.serviceAccountName" + + // SparkKubernetesDriverLabelPrefix is the Spark configuration key prefix for labels on the driver Pod. + SparkKubernetesDriverLabelTemplate = "spark.kubernetes.driver.label.%s" + + // SparkKubernetesDriverAnnotationPrefix is the Spark configuration key prefix for annotations on the driver Pod. + SparkKubernetesDriverAnnotationTemplate = "spark.kubernetes.driver.annotation.%s" + + // SparkKubernetesDriverServiceLabelPrefix is the key prefix of annotations to be added to the driver service. + SparkKubernetesDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s" + + // SparkKubernetesDriverServiceAnnotationPrefix is the key prefix of annotations to be added to the driver service. + SparkKubernetesDriverServiceAnnotationTemplate = "spark.kubernetes.driver.service.annotation.%s" + + // SparkKubernetesExecutorLabelPrefix is the Spark configuration key prefix for labels on the executor Pods. + SparkKubernetesExecutorLabelTemplate = "spark.kubernetes.executor.label.%s" + + // SparkKubernetesExecutorAnnotationPrefix is the Spark configuration key prefix for annotations on the executor Pods. + SparkKubernetesExecutorAnnotationTemplate = "spark.kubernetes.executor.annotation.%s" + + // SparkKubernetesDriverPodName is the Spark configuration key for driver pod name. + SparkKubernetesDriverPodName = "spark.kubernetes.driver.pod.name" + + SparkKubernetesExecutorPodNamePrefix = "spark.kubernetes.executor.podNamePrefix" + + // SparkKubernetesDriverRequestCores is the configuration property for specifying the physical CPU request for the driver. + SparkKubernetesDriverRequestCores = "spark.kubernetes.driver.request.cores" + + // SparkKubernetesDriverLimitCores is the configuration property for specifying the hard CPU limit for the driver pod. + SparkKubernetesDriverLimitCores = "spark.kubernetes.driver.limit.cores" + + // SparkKubernetesExecutorRequestCores is the configuration property for specifying the physical CPU request for executors. + SparkKubernetesExecutorRequestCores = "spark.kubernetes.executor.request.cores" + + // SparkKubernetesExecutorLimitCores is the configuration property for specifying the hard CPU limit for the executor pods. + SparkKubernetesExecutorLimitCores = "spark.kubernetes.executor.limit.cores" + + // SparkKubernetesNodeSelectorPrefix is the configuration property prefix for specifying node selector for the pods. + SparkKubernetesNodeSelectorTemplate = "spark.kubernetes.node.selector.%s" + + SparkKubernetesDriverNodeSelectorTemplate = "spark.kubernetes.driver.node.selector.%s" + + SparkKubernetesExecutorNodeSelectorTemplate = "spark.kubernetes.executor.node.selector.%s" + + // SparkKubernetesDriverEnvPrefix is the Spark configuration prefix for setting environment variables + // into the driver. + SparkKubernetesDriverEnvTemplate = "spark.kubernetes.driverEnv.%s" + + // SparkKubernetesDriverSecretsPrefix is the configuration property prefix for specifying secrets to be mounted into the + // driver. + SparkKubernetesDriverSecretsTemplate = "spark.kubernetes.driver.secrets.%s" + + // SparkKubernetesExecutorSecretsPrefix is the configuration property prefix for specifying secrets to be mounted into the + // executors. + SparkKubernetesExecutorSecretsTemplate = "spark.kubernetes.executor.secrets.%s" + + // SparkKubernetesDriverSecretKeyRefPrefix is the configuration property prefix for specifying environment variables + // from SecretKeyRefs for the driver. + SparkKubernetesDriverSecretKeyRefTemplate = "spark.kubernetes.driver.secretKeyRef.%s" + + // SparkKubernetesExecutorSecretKeyRefPrefix is the configuration property prefix for specifying environment variables + // from SecretKeyRefs for the executors. + SparkKubernetesExecutorSecretKeyRefTemplate = "spark.kubernetes.executor.secretKeyRef.%s" + + // SparkKubernetesDriverContainerImage is the configuration property for specifying a custom driver container image. + SparkKubernetesDriverContainerImage = "spark.kubernetes.driver.container.image" + + // SparkKubernetesExecutorContainerImage is the configuration property for specifying a custom executor container image. + SparkKubernetesExecutorContainerImage = "spark.kubernetes.executor.container.image" + + // SparkKubernetesDriverVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. + SparkKubernetesDriverVolumesPrefix = "spark.kubernetes.driver.volumes." + SparkKubernetesDriverVolumesMountPathTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.path" + SparkKubernetesDriverVolumesMountSubPathTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.subPath" + SparkKubernetesDriverVolumesMountReadOnlyTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.readOnly" + SparkKubernetesDriverVolumesOptionsTemplate = "spark.kubernetes.driver.volumes.%s.%s.options.%s" + + // SparkKubernetesExecutorVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. + SparkKubernetesExecutorVolumesPrefix = "spark.kubernetes.executor.volumes." + SparkKubernetesExecutorVolumesMountPathTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.path" + SparkKubernetesExecutorVolumesMountSubPathTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.subPath" + SparkKubernetesExecutorVolumesMountReadOnlyTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.readOnly" + SparkKubernetesExecutorVolumesOptionsTemplate = "spark.kubernetes.executor.volumes.%s.%s.options.%s" + + // SparkKubernetesMemoryOverheadFactor is the Spark configuration key for specifying memory overhead factor used for Non-JVM memory. + SparkKubernetesMemoryOverheadFactor = "spark.kubernetes.memoryOverheadFactor" + + // SparkKubernetesPysparkPythonVersion is the Spark configuration key for specifying python version used. + SparkKubernetesPysparkPythonVersion = "spark.kubernetes.pyspark.pythonVersion" + + SparkKubernetesDriverPodTemplateFile = "spark.kubernetes.driver.podTemplateFile" + + SparkKubernetesDriverPodTemplateContainerName = "spark.kubernetes.driver.podTemplateContainerName" + + SparkKubernetesExecutorPodTemplateFile = "spark.kubernetes.executor.podTemplateFile" + + SparkKubernetesExecutorPodTemplateContainerName = "spark.kubernetes.executor.podTemplateContainerName" + + SparkKubernetesDriverSchedulerName = "spark.kubernetes.driver.schedulerName" + + SparkKubernetesExecutorSchedulerName = "spark.kubernetes.executor.schedulerName" + + // SparkExecutorEnvVarConfigKeyPrefix is the Spark configuration prefix for setting environment variables + // into the executor. + SparkExecutorEnvVarConfigKeyPrefix = "spark.executorEnv." + + // SparkKubernetesInitContainerImage is the Spark configuration key for specifying a custom init-container image. + SparkKubernetesInitContainerImage = "spark.kubernetes.initContainer.image" + + // SparkKubernetesMountDependenciesJarsDownloadDir is the Spark configuration key for specifying the download path in the driver and + // executors for remote jars. + SparkKubernetesMountDependenciesJarsDownloadDir = "spark.kubernetes.mountDependencies.jarsDownloadDir" + + // SparkKubernetesMountDependenciesFilesDownloadDir is the Spark configuration key for specifying the download path in the driver and + // executors for remote files. + SparkKubernetesMountDependenciesFilesDownloadDir = "spark.kubernetes.mountDependencies.filesDownloadDir" + + // SparkKubernetesMountDependenciesTimeout is the Spark configuration key for specifying the timeout in seconds of downloading + // remote dependencies. + SparkKubernetesMountDependenciesTimeout = "spark.kubernetes.mountDependencies.timeout" + + // SparkKubernetesMountDependenciesMaxSimultaneousDownloads is the Spark configuration key for specifying the maximum number of remote + // dependencies to download. + SparkKubernetesMountDependenciesMaxSimultaneousDownloads = "spark.kubernetes.mountDependencies.maxSimultaneousDownloads" + + // SparkKubernetesSubmissionWaitAppCompletion is the Spark configuration key for specifying whether to wait for application to complete. + SparkKubernetesSubmissionWaitAppCompletion = "spark.kubernetes.submission.waitAppCompletion" + + // SparkDriverExtraJavaOptions is the Spark configuration key for a string of extra JVM options to pass to driver. + SparkDriverExtraJavaOptions = "spark.driver.extraJavaOptions" + + // SparkExecutorExtraJavaOptions is the Spark configuration key for a string of extra JVM options to pass to executors. + SparkExecutorExtraJavaOptions = "spark.executor.extraJavaOptions" + + // SparkKubernetesExecutorDeleteOnTermination is the Spark configuration for specifying whether executor pods should be deleted in case of failure or normal termination. + SparkKubernetesExecutorDeleteOnTermination = "spark.kubernetes.executor.deleteOnTermination" +) + +// Dynamic allocation properties. +// Ref: https://spark.apache.org/docs/latest/configuration.html#dynamic-allocation +const ( + // SparkDynamicAllocationEnabled is the Spark configuration key for specifying if dynamic + // allocation is enabled or not. + SparkDynamicAllocationEnabled = "spark.dynamicAllocation.enabled" + + SparkDynamicAllocationExecutorIdleTimeout = "spark.dynamicAllocation.executorIdleTimeout" + + SparkDynamicAllocationCachedExecutorIdleTimeout = "spark.dynamicAllocation.cachedExecutorIdleTimeout" + + // SparkDynamicAllocationInitialExecutors is the Spark configuration key for specifying + // the initial number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationInitialExecutors = "spark.dynamicAllocation.initialExecutors" + + // SparkDynamicAllocationMaxExecutors is the Spark configuration key for specifying the + // upper bound of the number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationMaxExecutors = "spark.dynamicAllocation.maxExecutors" + + // SparkDynamicAllocationMinExecutors is the Spark configuration key for specifying the + // lower bound of the number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationMinExecutors = "spark.dynamicAllocation.minExecutors" + + SparkDynamicAllocationExecutorAllocationRatio = "spark.dynamicAllocation.executorAllocationRatio" + + SparkDynamicAllocationSchedulerBacklogTimeout = "spark.dynamicAllocation.schedulerBacklogTimeout" + + SparkDynamicAllocationSustainedSchedulerBacklogTimeout = "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout" + + // SparkDynamicAllocationShuffleTrackingEnabled is the Spark configuration key for + // specifying if shuffle data tracking is enabled. + SparkDynamicAllocationShuffleTrackingEnabled = "spark.dynamicAllocation.shuffleTracking.enabled" + + // SparkDynamicAllocationShuffleTrackingTimeout is the Spark configuration key for specifying + // the shuffle tracking timeout in milliseconds if shuffle tracking is enabled. + SparkDynamicAllocationShuffleTrackingTimeout = "spark.dynamicAllocation.shuffleTracking.timeout" +) + +const ( + // SparkRoleDriver is the value of the spark-role label for the driver. + SparkRoleDriver = "driver" + + // SparkRoleExecutor is the value of the spark-role label for the executors. + SparkRoleExecutor = "executor" +) + +const ( + // DefaultSparkConfDir is the default directory for Spark configuration files if not specified. + // This directory is where the Spark ConfigMap is mounted in the driver and executor containers. + DefaultSparkConfDir = "/etc/spark/conf" + + // SparkConfigMapVolumeName is the name of the ConfigMap volume of Spark configuration files. + SparkConfigMapVolumeName = "spark-configmap-volume" + + // DefaultHadoopConfDir is the default directory for Spark configuration files if not specified. + // This directory is where the Hadoop ConfigMap is mounted in the driver and executor containers. + DefaultHadoopConfDir = "/etc/hadoop/conf" + + // HadoopConfigMapVolumeName is the name of the ConfigMap volume of Hadoop configuration files. + HadoopConfigMapVolumeName = "hadoop-configmap-volume" + + // EnvSparkConfDir is the environment variable to add to the driver and executor Pods that point + // to the directory where the Spark ConfigMap is mounted. + EnvSparkConfDir = "SPARK_CONF_DIR" + + // EnvHadoopConfDir is the environment variable to add to the driver and executor Pods that point + // to the directory where the Hadoop ConfigMap is mounted. + EnvHadoopConfDir = "HADOOP_CONF_DIR" +) + +const ( + // LabelSparkApplicationSelector is the AppID set by the spark-distribution on the driver/executors Pods. + LabelSparkApplicationSelector = "spark-app-selector" + + // LabelSparkRole is the driver/executor label set by the operator/spark-distribution on the driver/executors Pods. + LabelSparkRole = "spark-role" + + // LabelAnnotationPrefix is the prefix of every labels and annotations added by the controller. + LabelAnnotationPrefix = "sparkoperator.k8s.io/" + + // LabelSparkAppName is the name of the label for the SparkApplication object name. + LabelSparkAppName = LabelAnnotationPrefix + "app-name" + + // LabelScheduledSparkAppName is the name of the label for the ScheduledSparkApplication object name. + LabelScheduledSparkAppName = LabelAnnotationPrefix + "scheduled-app-name" + + // LabelLaunchedBySparkOperator is a label on Spark pods launched through the Spark Operator. + LabelLaunchedBySparkOperator = LabelAnnotationPrefix + "launched-by-spark-operator" + + // LabelMutatedBySparkOperator is a label on Spark pods that need to be mutated by webhook. + LabelMutatedBySparkOperator = LabelAnnotationPrefix + "mutated-by-spark-operator" + + // LabelSubmissionID is the label that records the submission ID of the current run of an application. + LabelSubmissionID = LabelAnnotationPrefix + "submission-id" + + // LabelSparkExecutorID is the label that records executor pod ID + LabelSparkExecutorID = "spark-exec-id" +) + +const ( + // SparkDriverContainerName is name of driver container in spark driver pod. + SparkDriverContainerName = "spark-kubernetes-driver" + + // SparkExecutorContainerName is name of executor container in spark executor pod. + SparkExecutorContainerName = "executor" + + // Spark3DefaultExecutorContainerName is the default executor container name in + // Spark 3.x, which allows the container name to be configured through the pod + // template support. + Spark3DefaultExecutorContainerName = "spark-kubernetes-executor" + + // SparkLocalDirVolumePrefix is the volume name prefix for "scratch" space directory. + SparkLocalDirVolumePrefix = "spark-local-dir-" +) + +const ( + SparkUIPortKey = "spark.ui.port" + + DefaultSparkWebUIPort int32 = 4040 + + DefaultSparkWebUIPortName = "spark-driver-ui-port" +) + +// https://spark.apache.org/docs/latest/configuration.html +const ( + DefaultCPUMilliCores = 1000 + + DefaultMemoryBytes = 1 << 30 // 1 Gi + + DefaultJVMMemoryOverheadFactor = 0.1 + + DefaultNonJVMMemoryOverheadFactor = 0.4 + + MinMemoryOverhead = 384 * (1 << 20) // 384 Mi +) + +const ( + // EnvGoogleApplicationCredentials is the environment variable used by the + // Application Default Credentials mechanism. More details can be found at + // https://developers.google.com/identity/protocols/application-default-credentials. + EnvGoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS" + + // ServiceAccountJSONKeyFileName is the assumed name of the service account + // Json key file. This name is added to the service account secret mount path to + // form the path to the Json key file referred to by GOOGLE_APPLICATION_CREDENTIALS. + ServiceAccountJSONKeyFileName = "key.json" + + // EnvHadoopTokenFileLocation is the environment variable for specifying the location + // where the file storing the Hadoop delegation token is located. + EnvHadoopTokenFileLocation = "HADOOP_TOKEN_FILE_LOCATION" + + // HadoopDelegationTokenFileName is the assumed name of the file storing the Hadoop + // delegation token. This name is added to the delegation token secret mount path to + // form the path to the file referred to by HADOOP_TOKEN_FILE_LOCATION. + HadoopDelegationTokenFileName = "hadoop.token" +) diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/common/volcano.go b/vendor/github.com/kubeflow/spark-operator/pkg/common/volcano.go new file mode 100644 index 0000000000..cda710480d --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/common/volcano.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + VolcanoSchedulerName = "volcano" + + VolcanoPodGroupName = "podgroups.scheduling.volcano.sh" +) diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/capabilities.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/capabilities.go new file mode 100644 index 0000000000..068bdcb4df --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/capabilities.go @@ -0,0 +1,85 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" + + "github.com/golang/glog" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" +) + +type Capabilities map[string]bool + +func (c Capabilities) Has(wanted string) bool { + return c[wanted] +} + +func (c Capabilities) String() string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return strings.Join(keys, ", ") +} + +var ( + IngressCapabilities Capabilities +) + +func InitializeIngressCapabilities(client kubernetes.Interface) (err error) { + if IngressCapabilities != nil { + return + } + + IngressCapabilities, err = getPreferredAvailableAPIs(client, "Ingress") + return +} + +// getPreferredAvailableAPIs queries the cluster for the preferred resources information and returns a Capabilities +// instance containing those api groups that support the specified kind. +// +// kind should be the title case singular name of the kind. For example, "Ingress" is the kind for a resource "ingress". +func getPreferredAvailableAPIs(client kubernetes.Interface, kind string) (Capabilities, error) { + discoveryclient := client.Discovery() + lists, err := discoveryclient.ServerPreferredResources() + if err != nil { + if discovery.IsGroupDiscoveryFailedError(err) { + glog.Infof("There is an orphaned API service. Server reports: %s", err) + } else { + return nil, err + } + } + + caps := Capabilities{} + for _, list := range lists { + if len(list.APIResources) == 0 { + continue + } + for _, resource := range list.APIResources { + if len(resource.Verbs) == 0 { + continue + } + if resource.Kind == kind { + caps[list.GroupVersion] = true + } + } + } + + return caps, nil +} diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/doc.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/doc.go new file mode 100644 index 0000000000..e5ae981964 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +// Package util contains utility code shared by other packages. diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/metrics.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/metrics.go new file mode 100644 index 0000000000..fe87508d9d --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/metrics.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" +) + +func CreateValidMetricNameLabel(prefix, name string) string { + // "-" is not a valid character for prometheus metric names or labels. + return strings.Replace(prefix+name, "-", "_", -1) +} diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/resourcequota.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/resourcequota.go new file mode 100644 index 0000000000..1b79eea234 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/resourcequota.go @@ -0,0 +1,37 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + corev1 "k8s.io/api/core/v1" +) + +// SumResourceList sums the resource list. +func SumResourceList(lists []corev1.ResourceList) corev1.ResourceList { + total := corev1.ResourceList{} + for _, list := range lists { + for name, quantity := range list { + if value, ok := total[name]; !ok { + total[name] = quantity.DeepCopy() + } else { + value.Add(quantity) + total[name] = value + } + } + } + return total +} diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/sparkapplication.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/sparkapplication.go new file mode 100644 index 0000000000..65bdb26922 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/sparkapplication.go @@ -0,0 +1,497 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/md5" + "fmt" + "reflect" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" +) + +// GetDriverPodName returns name of the driver pod of the given spark application. +func GetDriverPodName(app *v1beta2.SparkApplication) string { + name := app.Spec.Driver.PodName + if name != nil && len(*name) > 0 { + return *name + } + + sparkConf := app.Spec.SparkConf + if sparkConf[common.SparkKubernetesDriverPodName] != "" { + return sparkConf[common.SparkKubernetesDriverPodName] + } + + return fmt.Sprintf("%s-driver", app.Name) +} + +// GetApplicationState returns the state of the given SparkApplication. +func GetApplicationState(app *v1beta2.SparkApplication) v1beta2.ApplicationStateType { + return app.Status.AppState.State +} + +// IsTerminated returns whether the given SparkApplication is terminated. +func IsTerminated(app *v1beta2.SparkApplication) bool { + return app.Status.AppState.State == v1beta2.ApplicationStateCompleted || + app.Status.AppState.State == v1beta2.ApplicationStateFailed +} + +// IsExpired returns whether the given SparkApplication is expired. +func IsExpired(app *v1beta2.SparkApplication) bool { + // The application has no TTL defined and will never expire. + if app.Spec.TimeToLiveSeconds == nil { + return false + } + + ttl := time.Duration(*app.Spec.TimeToLiveSeconds) * time.Second + now := time.Now() + if !app.Status.TerminationTime.IsZero() && now.Sub(app.Status.TerminationTime.Time) > ttl { + return true + } + + return false +} + +// IsDriverRunning returns whether the driver pod of the given SparkApplication is running. +func IsDriverRunning(app *v1beta2.SparkApplication) bool { + return app.Status.AppState.State == v1beta2.ApplicationStateRunning +} + +func ShouldRetry(app *v1beta2.SparkApplication) bool { + switch app.Status.AppState.State { + case v1beta2.ApplicationStateSucceeding: + return app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways + case v1beta2.ApplicationStateFailing: + if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways { + return true + } else if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyOnFailure { + // We retry if we haven't hit the retry limit. + if app.Spec.RestartPolicy.OnFailureRetries != nil && app.Status.ExecutionAttempts <= *app.Spec.RestartPolicy.OnFailureRetries { + return true + } + } + case v1beta2.ApplicationStateFailedSubmission: + if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways { + return true + } else if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyOnFailure { + // We retry if we haven't hit the retry limit. + if app.Spec.RestartPolicy.OnSubmissionFailureRetries != nil && app.Status.SubmissionAttempts <= *app.Spec.RestartPolicy.OnSubmissionFailureRetries { + return true + } + } + } + return false +} + +func TimeUntilNextRetryDue(app *v1beta2.SparkApplication) (time.Duration, error) { + var retryInterval *int64 + switch app.Status.AppState.State { + case v1beta2.ApplicationStateFailedSubmission: + retryInterval = app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval + case v1beta2.ApplicationStateFailing: + retryInterval = app.Spec.RestartPolicy.OnFailureRetryInterval + } + + attemptsDone := app.Status.SubmissionAttempts + lastAttemptTime := app.Status.LastSubmissionAttemptTime + if retryInterval == nil || lastAttemptTime.IsZero() || attemptsDone <= 0 { + return -1, fmt.Errorf("invalid retry interval (%v), last attempt time (%v) or attemptsDone (%v)", retryInterval, lastAttemptTime, attemptsDone) + } + + // Retry wait time is attempts*RetryInterval to do a linear backoff. + interval := time.Duration(*retryInterval) * time.Second * time.Duration(attemptsDone) + currentTime := time.Now() + return interval - currentTime.Sub(lastAttemptTime.Time), nil +} + +func GetLocalVolumes(app *v1beta2.SparkApplication) map[string]corev1.Volume { + volumes := make(map[string]corev1.Volume) + for _, volume := range app.Spec.Volumes { + if strings.HasPrefix(volume.Name, common.SparkLocalDirVolumePrefix) { + volumes[volume.Name] = volume + } + } + return volumes +} + +func GetDriverLocalVolumeMounts(app *v1beta2.SparkApplication) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{} + for _, volumeMount := range app.Spec.Driver.VolumeMounts { + if strings.HasPrefix(volumeMount.Name, common.SparkLocalDirVolumePrefix) { + volumeMounts = append(volumeMounts, volumeMount) + } + } + return volumeMounts +} + +func GetExecutorLocalVolumeMounts(app *v1beta2.SparkApplication) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{} + for _, volumeMount := range app.Spec.Executor.VolumeMounts { + if strings.HasPrefix(volumeMount.Name, common.SparkLocalDirVolumePrefix) { + volumeMounts = append(volumeMounts, volumeMount) + } + } + return volumeMounts +} + +func generateName(name, suffix string) string { + // Some resource names are used as DNS labels, so must be 63 characters or shorter + preferredName := fmt.Sprintf("%s-%s", name, suffix) + if len(preferredName) <= 63 { + return preferredName + } + + // Truncate the name and append a hash to ensure uniqueness while staying below the limit + maxNameLength := 63 - len(suffix) - 10 // 8 for the hash, 2 for the dash + hash := fmt.Sprintf("%x", md5.Sum([]byte(preferredName))) + return fmt.Sprintf("%s-%s-%s", name[:maxNameLength], hash[:8], suffix) +} + +func GetDefaultUIServiceName(app *v1beta2.SparkApplication) string { + return generateName(app.Name, "ui-svc") +} + +func GetDefaultUIIngressName(app *v1beta2.SparkApplication) string { + return generateName(app.Name, "ui-ingress") +} + +func GetResourceLabels(app *v1beta2.SparkApplication) map[string]string { + labels := map[string]string{ + common.LabelSparkAppName: app.Name, + } + if app.Status.SubmissionID != "" { + labels[common.LabelSubmissionID] = app.Status.SubmissionID + } + return labels +} + +func GetWebUIServiceLabels(app *v1beta2.SparkApplication) map[string]string { + labels := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceLabels != nil { + for key, value := range app.Spec.SparkUIOptions.ServiceLabels { + labels[key] = value + } + } + return labels +} + +func GetWebUIServiceAnnotations(app *v1beta2.SparkApplication) map[string]string { + serviceAnnotations := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceAnnotations != nil { + for key, value := range app.Spec.SparkUIOptions.ServiceAnnotations { + serviceAnnotations[key] = value + } + } + return serviceAnnotations +} + +func GetWebUIServiceType(app *v1beta2.SparkApplication) corev1.ServiceType { + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceType != nil { + return *app.Spec.SparkUIOptions.ServiceType + } + return corev1.ServiceTypeClusterIP +} + +func GetWebUIIngressAnnotations(app *v1beta2.SparkApplication) map[string]string { + annotations := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressAnnotations != nil { + for key, value := range app.Spec.SparkUIOptions.IngressAnnotations { + annotations[key] = value + } + } + return annotations +} + +func GetWebUIIngressTLS(app *v1beta2.SparkApplication) []networkingv1.IngressTLS { + ingressTLSs := []networkingv1.IngressTLS{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressTLS != nil { + ingressTLSs = append(ingressTLSs, app.Spec.SparkUIOptions.IngressTLS...) + } + return ingressTLSs +} + +// GetPrometheusConfigMapName returns the name of the ConfigMap for Prometheus configuration. +func GetPrometheusConfigMapName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("%s-%s", app.Name, common.PrometheusConfigMapNameSuffix) +} + +// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not. +func PrometheusMonitoringEnabled(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.Prometheus != nil +} + +// HasPrometheusConfigFile returns if Prometheus monitoring uses a configuration file in the container. +func HasPrometheusConfigFile(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.Prometheus.ConfigFile != nil && + *app.Spec.Monitoring.Prometheus.ConfigFile != "" +} + +// HasPrometheusConfig returns if Prometheus monitoring defines metricsProperties in the spec. +func HasMetricsProperties(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.MetricsProperties != nil && + *app.Spec.Monitoring.MetricsProperties != "" +} + +// HasPrometheusConfigFile returns if Monitoring defines metricsPropertiesFile in the spec. +func HasMetricsPropertiesFile(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.MetricsPropertiesFile != nil && + *app.Spec.Monitoring.MetricsPropertiesFile != "" +} + +// ExposeDriverMetrics returns if driver metrics should be exposed. +func ExposeDriverMetrics(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.ExposeDriverMetrics +} + +// ExposeExecutorMetrics returns if executor metrics should be exposed. +func ExposeExecutorMetrics(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.ExposeExecutorMetrics +} + +// GetOwnerReference returns an OwnerReference pointing to the given app. +func GetOwnerReference(app *v1beta2.SparkApplication) metav1.OwnerReference { + return metav1.OwnerReference{ + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), + Name: app.Name, + UID: app.UID, + Controller: BoolPtr(true), + BlockOwnerDeletion: BoolPtr(true), + } +} + +// GetDriverState returns the driver state from the given driver pod. +func GetDriverState(pod *corev1.Pod) v1beta2.DriverState { + switch pod.Status.Phase { + case corev1.PodPending: + return v1beta2.DriverStatePending + case corev1.PodRunning: + state := GetDriverContainerTerminatedState(pod) + if state != nil { + if state.ExitCode == 0 { + return v1beta2.DriverStateCompleted + } + return v1beta2.DriverStateFailed + } + return v1beta2.DriverStateRunning + case corev1.PodSucceeded: + return v1beta2.DriverStateCompleted + case corev1.PodFailed: + state := GetDriverContainerTerminatedState(pod) + if state != nil && state.ExitCode == 0 { + return v1beta2.DriverStateCompleted + } + return v1beta2.DriverStateFailed + default: + return v1beta2.DriverStateUnknown + } +} + +// GetExecutorState returns the executor state from the given executor pod. +func GetExecutorState(pod *corev1.Pod) v1beta2.ExecutorState { + switch pod.Status.Phase { + case corev1.PodPending: + return v1beta2.ExecutorStatePending + case corev1.PodRunning: + return v1beta2.ExecutorStateRunning + case corev1.PodSucceeded: + return v1beta2.ExecutorStateCompleted + case corev1.PodFailed: + return v1beta2.ExecutorStateFailed + default: + return v1beta2.ExecutorStateUnknown + } +} + +// GetDriverContainerTerminatedState returns the terminated state of the driver container. +func GetDriverContainerTerminatedState(pod *corev1.Pod) *corev1.ContainerStateTerminated { + return GetContainerTerminatedState(pod, common.SparkDriverContainerName) +} + +// GetExecutorContainerTerminatedState returns the terminated state of the executor container. +func GetExecutorContainerTerminatedState(pod *corev1.Pod) *corev1.ContainerStateTerminated { + state := GetContainerTerminatedState(pod, common.Spark3DefaultExecutorContainerName) + if state == nil { + state = GetContainerTerminatedState(pod, common.SparkExecutorContainerName) + } + return state +} + +// GetContainerTerminatedState returns the terminated state of the container. +func GetContainerTerminatedState(pod *corev1.Pod, container string) *corev1.ContainerStateTerminated { + for _, c := range pod.Status.ContainerStatuses { + if c.Name == container { + if c.State.Terminated != nil { + return c.State.Terminated + } + return nil + } + } + return nil +} + +// IsDriverTerminated returns whether the driver state is a terminated state. +func IsDriverTerminated(driverState v1beta2.DriverState) bool { + return driverState == v1beta2.DriverStateCompleted || driverState == v1beta2.DriverStateFailed +} + +// IsExecutorTerminated returns whether the executor state is a terminated state. +func IsExecutorTerminated(executorState v1beta2.ExecutorState) bool { + return executorState == v1beta2.ExecutorStateCompleted || executorState == v1beta2.ExecutorStateFailed +} + +// DriverStateToApplicationState converts driver state to application state. +func DriverStateToApplicationState(driverState v1beta2.DriverState) v1beta2.ApplicationStateType { + switch driverState { + case v1beta2.DriverStatePending: + return v1beta2.ApplicationStateSubmitted + case v1beta2.DriverStateRunning: + return v1beta2.ApplicationStateRunning + case v1beta2.DriverStateCompleted: + return v1beta2.ApplicationStateSucceeding + case v1beta2.DriverStateFailed: + return v1beta2.ApplicationStateFailing + default: + return v1beta2.ApplicationStateUnknown + } +} + +// GetDriverRequestResource returns the driver request resource list. +func GetDriverRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { + minResource := corev1.ResourceList{} + + // Cores correspond to driver's core request + if app.Spec.Driver.Cores != nil { + if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Driver.Cores)); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + + // CoreLimit correspond to driver's core limit, this attribute will be used only when core request is empty. + if app.Spec.Driver.CoreLimit != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(*app.Spec.Driver.CoreLimit); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // Memory + MemoryOverhead correspond to driver's memory request + if app.Spec.Driver.Memory != nil { + if value, err := resource.ParseQuantity(*app.Spec.Driver.Memory); err == nil { + minResource[corev1.ResourceMemory] = value + } + } + if app.Spec.Driver.MemoryOverhead != nil { + if value, err := resource.ParseQuantity(*app.Spec.Driver.MemoryOverhead); err == nil { + if existing, ok := minResource[corev1.ResourceMemory]; ok { + existing.Add(value) + minResource[corev1.ResourceMemory] = existing + } + } + } + + return minResource +} + +// GetExecutorRequestResource returns the executor request resource list. +func GetExecutorRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { + minResource := corev1.ResourceList{} + + // CoreRequest correspond to executor's core request + if app.Spec.Executor.CoreRequest != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreRequest); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + + // Use Core attribute if CoreRequest is empty + if app.Spec.Executor.Cores != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Executor.Cores)); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // CoreLimit correspond to executor's core limit, this attribute will be used only when core request is empty. + if app.Spec.Executor.CoreLimit != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreLimit); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // Memory + MemoryOverhead correspond to executor's memory request + if app.Spec.Executor.Memory != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.Memory); err == nil { + minResource[corev1.ResourceMemory] = value + } + } + if app.Spec.Executor.MemoryOverhead != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.MemoryOverhead); err == nil { + if existing, ok := minResource[corev1.ResourceMemory]; ok { + existing.Add(value) + minResource[corev1.ResourceMemory] = existing + } + } + } + + resourceList := []corev1.ResourceList{{}} + for i := int32(0); i < *app.Spec.Executor.Instances; i++ { + resourceList = append(resourceList, minResource) + } + return SumResourceList(resourceList) +} + +// GetInitialExecutorNumber calculates the initial number of executor pods that will be requested by the driver on startup. +func GetInitialExecutorNumber(app *v1beta2.SparkApplication) int32 { + // The reference for this implementation: https://github.com/apache/spark/blob/ba208b9ca99990fa329c36b28d0aa2a5f4d0a77e/core/src/main/scala/org/apache/spark/scheduler/cluster/SchedulerBackendUtils.scala#L31 + var initialNumExecutors int32 + + dynamicAllocationEnabled := app.Spec.DynamicAllocation != nil && app.Spec.DynamicAllocation.Enabled + if dynamicAllocationEnabled { + if app.Spec.Executor.Instances != nil { + initialNumExecutors = max(initialNumExecutors, *app.Spec.Executor.Instances) + } + if app.Spec.DynamicAllocation.InitialExecutors != nil { + initialNumExecutors = max(initialNumExecutors, *app.Spec.DynamicAllocation.InitialExecutors) + } + if app.Spec.DynamicAllocation.MinExecutors != nil { + initialNumExecutors = max(initialNumExecutors, *app.Spec.DynamicAllocation.MinExecutors) + } + } else { + initialNumExecutors = 2 + if app.Spec.Executor.Instances != nil { + initialNumExecutors = *app.Spec.Executor.Instances + } + } + + return initialNumExecutors +} diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/sparkpod.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/sparkpod.go new file mode 100644 index 0000000000..fbb7650f6c --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/sparkpod.go @@ -0,0 +1,53 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + corev1 "k8s.io/api/core/v1" + + "github.com/kubeflow/spark-operator/pkg/common" +) + +// IsLaunchedBySparkOperator returns whether the given pod is launched by the Spark Operator. +func IsLaunchedBySparkOperator(pod *corev1.Pod) bool { + return pod.Labels[common.LabelLaunchedBySparkOperator] == "true" +} + +// IsDriverPod returns whether the given pod is a Spark driver Pod. +func IsDriverPod(pod *corev1.Pod) bool { + return pod.Labels[common.LabelSparkRole] == common.SparkRoleDriver +} + +// IsExecutorPod returns whether the given pod is a Spark executor Pod. +func IsExecutorPod(pod *corev1.Pod) bool { + return pod.Labels[common.LabelSparkRole] == common.SparkRoleExecutor +} + +// GetSparkExecutorID returns the Spark executor ID by checking out pod labels. +func GetSparkExecutorID(pod *corev1.Pod) string { + return pod.Labels[common.LabelSparkExecutorID] +} + +// GetAppName returns the spark application name by checking out pod labels. +func GetAppName(pod *corev1.Pod) string { + return pod.Labels[common.LabelSparkAppName] +} + +// GetSparkApplicationID returns the spark application ID by checking out pod labels. +func GetSparkApplicationID(pod *corev1.Pod) string { + return pod.Labels[common.LabelSparkApplicationSelector] +} diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/util.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/util.go new file mode 100644 index 0000000000..25f664dbca --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/util.go @@ -0,0 +1,120 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "golang.org/x/mod/semver" + "sigs.k8s.io/yaml" + + "github.com/kubeflow/spark-operator/pkg/common" +) + +func GetMasterURL() (string, error) { + kubernetesServiceHost := os.Getenv(common.EnvKubernetesServiceHost) + if kubernetesServiceHost == "" { + return "", fmt.Errorf("environment variable %s is not found", common.EnvKubernetesServiceHost) + } + + kubernetesServicePort := os.Getenv(common.EnvKubernetesServicePort) + if kubernetesServicePort == "" { + return "", fmt.Errorf("environment variable %s is not found", common.EnvKubernetesServicePort) + } + // check if the host is IPv6 address + if strings.Contains(kubernetesServiceHost, ":") && !strings.HasPrefix(kubernetesServiceHost, "[") { + return fmt.Sprintf("k8s://https://[%s]:%s", kubernetesServiceHost, kubernetesServicePort), nil + } + return fmt.Sprintf("k8s://https://%s:%s", kubernetesServiceHost, kubernetesServicePort), nil +} + +// Helper functions to check and remove a string from a slice of strings. +// ContainsString checks if a given string is present in a slice +func ContainsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false +} + +// RemoveString removes a given string from a slice, if present +func RemoveString(slice []string, s string) (result []string) { + for _, item := range slice { + if item != s { + result = append(result, item) + } + } + return result +} + +func BoolPtr(b bool) *bool { + return &b +} + +func Int32Ptr(n int32) *int32 { + return &n +} + +func Int64Ptr(n int64) *int64 { + return &n +} + +func StringPtr(s string) *string { + return &s +} + +// CompareSemanticVersion compares two semantic versions. +func CompareSemanticVersion(v1, v2 string) int { + // Add 'v' prefix if needed + addPrefix := func(s string) string { + if !strings.HasPrefix(s, "v") { + return "v" + s + } + return s + } + return semver.Compare(addPrefix(v1), addPrefix(v2)) +} + +// WriteObjectToFile marshals the given object into a YAML document and writes it to the given file. +func WriteObjectToFile(obj interface{}, filePath string) error { + if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { + return err + } + + file, err := os.Create(filePath) + if err != nil { + return err + } + defer file.Close() + + data, err := yaml.Marshal(obj) + if err != nil { + return err + } + + _, err = file.Write(data) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/kubeflow/spark-operator/pkg/util/workqueue.go b/vendor/github.com/kubeflow/spark-operator/pkg/util/workqueue.go new file mode 100644 index 0000000000..8f8d18b568 --- /dev/null +++ b/vendor/github.com/kubeflow/spark-operator/pkg/util/workqueue.go @@ -0,0 +1,19 @@ +package util + +import ( + "time" + + "golang.org/x/time/rate" + "k8s.io/client-go/util/workqueue" +) + +// This allow to create a new rate limiter while tuning the BucketRateLimiter parameters +// This also prevent a "bug" in the BucketRateLimiter due to the fact that a BucketRateLimiter does not have a maxDelay parameter +func NewRateLimiter(qps int, bucketSize int, maxDelay time.Duration) workqueue.RateLimiter { + ratelimiter := workqueue.NewWithMaxWaitRateLimiter( + workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(qps), bucketSize)}, + ), maxDelay) + return ratelimiter +} diff --git a/vendor/github.com/project-codeflare/appwrapper/pkg/utils/utils.go b/vendor/github.com/project-codeflare/appwrapper/pkg/utils/utils.go index 33e853381f..b138826cf9 100644 --- a/vendor/github.com/project-codeflare/appwrapper/pkg/utils/utils.go +++ b/vendor/github.com/project-codeflare/appwrapper/pkg/utils/utils.go @@ -355,10 +355,6 @@ func GetPodSets(aw *workloadv1beta2.AppWrapper) ([]kueue.PodSet, error) { } } } - - if len(podSets) == 0 { - return nil, fmt.Errorf("no PodSets found for AppWrapper %s", aw.Name) - } return podSets, nil } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index ab091cf6ad..18436eaedf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -42,6 +42,8 @@ type config struct { TracerProvider trace.TracerProvider MeterProvider metric.MeterProvider SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -257,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 201867a869..fbcbfb84e0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -62,11 +62,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -102,11 +102,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index a15d06cb0c..04f425edfe 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.53.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 214acaf581..5d6e6156b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Client HTTP metrics. const ( clientRequestSize = "http.client.request.size" // Outgoing request bytes total diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index f0a9bb9efe..a01bfafbe0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -33,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -194,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index d01bdccf40..33580a35b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -9,11 +9,9 @@ import ( "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -24,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -34,10 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - traceSemconv semconv.HTTPServer - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, - - traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -88,6 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) } func handleErr(err error) { @@ -96,30 +87,6 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -134,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } opts = append(opts, h.spanStartOptions...) @@ -166,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -183,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,35 +176,35 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http next.ServeHTTP(w, r.WithContext(ctx)) - span.SetStatus(semconv.ServerStatus(rww.statusCode)) - span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ - StatusCode: rww.statusCode, - ReadBytes: bw.read.Load(), - ReadError: bw.err, - WriteBytes: rww.written, - WriteError: rww.err, + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), })...) - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) - h.responseBytesCounter.Add(ctx, rww.written, addOpts...) - // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { - attr := semconv.NewHTTPServer().Route(route) + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 0000000000..a945f55661 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 0000000000..aea171fb26 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(http.StatusOK) + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.WriteHeader(http.StatusOK) + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 3ec0ad00c8..9cae4cab86 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -4,6 +4,7 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "context" "fmt" "net/http" "os" @@ -11,6 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" ) type ResponseTelemetry struct { @@ -23,6 +25,11 @@ type ResponseTelemetry struct { type HTTPServer struct { duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue { return oldHTTPServer{}.Route(route) } -func NewHTTPServer() HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) - return HTTPServer{duplicate: env == "http/dup"} -} - -// ServerStatus returns a span status code and message for an HTTP status code +// Status returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func ServerStatus(code int) (codes.Code, string) { +func (s HTTPServer) Status(code int) (codes.Code, string) { if code < 100 || code >= 600 { return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) } @@ -80,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) { } return codes.Unset, "" } + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go similarity index 57% rename from vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 0c5d4c4608..745b8c67bc 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -4,11 +4,14 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "fmt" "net/http" + "reflect" + "strconv" "strings" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) type newHTTPServer struct{} @@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke func (n newHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e7f293761b..e6e14924f5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -9,8 +9,9 @@ import ( "strconv" "strings" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Byte size checked 16 above. } func requiredHTTPPort(https bool, port int) int { // nolint:revive @@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{ http.MethodPut: semconvNew.HTTPRequestMethodPut, http.MethodTrace: semconvNew.HTTPRequestMethodTrace, } + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c3e838aaa5..c999b05e67 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -7,9 +7,13 @@ import ( "errors" "io" "net/http" + "slices" + "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) @@ -72,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue { return semconv.HTTPStatusCode(status) } + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index a9a9226b39..b80a1db61f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 0d3cb2e4aa..b4119d3438 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,13 +11,15 @@ import ( "sync/atomic" "time" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) @@ -26,14 +28,16 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + semconv semconv.HTTPClient requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram @@ -53,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, + rt: base, + semconv: semconv.NewHTTPClient(), } defaultOpts := []Option{ @@ -76,6 +81,7 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn } func (t *Transport) createMeasures() { @@ -143,45 +149,49 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, addOpts...) + t.responseBytesCounter.Add(ctx, n, o) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) @@ -193,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index b0957f28ce..502c1bdafc 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.53.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 948f8406c0..0000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} - -func (w *respWriterWrapper) Flush() { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - - if f, ok := w.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b6495..d9abe194d9 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998e0..6107c17b89 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,64 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +107,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +234,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -3003,7 +3062,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3146,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 2025549332..5904bb7070 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,7 +5,7 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f58e..b7402576f9 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -650,7 +650,7 @@ should be canceled. ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f76a..070b1e57df 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -178,17 +178,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a89093173..657df34710 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d8..59992984d4 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896cc6..b3569e95e5 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6d..2acbac3546 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c595014..921f85961a 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b356..9b1da2c02b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -24,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e015..14e08c24a4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index f4d1857c4f..f2cdf3c651 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -4,5 +4,6 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 32f862790c..d511d0f271 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns information about the instrumentation // library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index ac90f1a260..4945f50830 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -62,7 +62,7 @@ type ReadOnlySpan interface { // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span @@ -642,7 +642,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 33d065a7cb..b7cede891c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index ada857995d..d5197e16ce 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -115,7 +115,7 @@ func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { name = hostPart } if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) + port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. } return } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md deleted file mode 100644 index 0b6cbe960c..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.24.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go deleted file mode 100644 index 6e688345cb..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go +++ /dev/null @@ -1,4387 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes FaaS attributes. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes Database attributes -const ( - // PoolNameKey is the attribute Key conforming to the "pool.name" semantic - // conventions. It represents the name of the connection pool; unique - // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, then the - // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) - // should be used - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myDataSource' - PoolNameKey = attribute.Key("pool.name") - - // StateKey is the attribute Key conforming to the "state" semantic - // conventions. It represents the state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'idle' - StateKey = attribute.Key("state") -) - -var ( - // idle - StateIdle = StateKey.String("idle") - // used - StateUsed = StateKey.String("used") -) - -// PoolName returns an attribute KeyValue conforming to the "pool.name" -// semantic conventions. It represents the name of the connection pool; unique -// within the instrumented application. In case the connection pool -// implementation doesn't provide a name, then the -// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) -// should be used -func PoolName(val string) attribute.KeyValue { - return PoolNameKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: experimental - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if the matched endpoint for the - // request had a rate-limiting policy.) - // Stability: experimental - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (if and only if the request was - // not handled.) - // Stability: experimental - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If and only if a route was - // successfully matched.) - // Stability: experimental - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Describes JVM buffer metric attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// Describes JVM memory metric attributes. -const ( - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// Describes System metric attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU metric attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory metric attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging metric attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem metric attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network metric attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process metric attributes -const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // process state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") -) - -var ( - // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") - // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") - // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") - // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") -) - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// The attributes used to describe telemetry in the context of databases. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") - - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") - - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") - - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") - - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBSystemKey = attribute.Key("db.system") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - // Deprecated: use `http.request.method` instead. - HTTPMethodKey = attribute.Key("http.method") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.request.header.content-length` instead. - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.response.header.content-length` instead. - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - // Deprecated: use `url.scheme` instead. - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - // Deprecated: use `http.response.status_code` instead. - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.path` and `url.query` instead. - HTTPTargetKey = attribute.Key("http.target") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.full` instead. - HTTPURLKey = attribute.Key("http.url") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - // Deprecated: use `user_agent.original` instead. - HTTPUserAgentKey = attribute.Key("http.user_agent") -) - -var ( - // HTTP/1.0 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. -// -// Deprecated: use `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. -// -// Deprecated: use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. -// -// Deprecated: use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. -// -// Deprecated: use `url.scheme` instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. -// -// Deprecated: use `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. -// -// Deprecated: use `url.path` and `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. -// -// Deprecated: use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. -// -// Deprecated: use `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address`. - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port`. - NetHostPortKey = attribute.Key("net.host.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address` on client spans and `client.address` on - // server spans. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port` on client spans and `client.port` on - // server spans. - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - // Deprecated: use `network.protocol.name`. - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - // Deprecated: use `network.protocol.version`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: use `network.local.address`. - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `network.local.port`. - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - // Deprecated: use `network.peer.address`. - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: no replacement at this time. - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - // Deprecated: use `network.peer.port`. - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport`. - NetTransportKey = attribute.Key("net.transport") -) - -var ( - // IPv4 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // ip_tcp - // - // Deprecated: use `network.transport`. - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - // - // Deprecated: use `network.transport`. - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - // - // Deprecated: use `network.transport`. - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - // - // Deprecated: use `network.transport`. - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - // - // Deprecated: use `network.transport`. - NetTransportOther = NetTransportKey.String("other") -) - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. -// -// Deprecated: use `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. -// -// Deprecated: use `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. -// -// Deprecated: use `server.address` on client spans and `client.address` on -// server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. -// -// Deprecated: use `server.port` on client spans and `client.port` on server -// spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. -// -// Deprecated: use `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. -// -// Deprecated: use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. -// -// Deprecated: use `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. -// -// Deprecated: use `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. -// -// Deprecated: use `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. -// -// Deprecated: no replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. -// -// Deprecated: use `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low - // cardinality. - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents an identifier for - // the messaging system being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationPublish = MessagingOperationKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationCreate = MessagingOperationKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs - MessagingOperationDeliver = MessagingOperationKey.String("deliver") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") - // Azure Event Hubs - MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") - // Azure Service Bus - MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // version of the protocol specified in `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the protocol specified in `network.protocol.name`. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. - URLFullKey = attribute.Key("url.full") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - URLPathKey = attribute.Key("url.path") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") -) - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go deleted file mode 100644 index d27e8a8f8b..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.24.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go deleted file mode 100644 index 6c019aafc3..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// This event represents an occurrence of a lifecycle transition on the iOS -// platform. -const ( - // IosStateKey is the attribute Key conforming to the "ios.state" semantic - // conventions. It represents the this attribute represents the state the - // application has transitioned into at the occurrence of the event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate - // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), - // and from which the `OS terminology` column values are derived. - IosStateKey = attribute.Key("ios.state") -) - -var ( - // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` - IosStateActive = IosStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` - IosStateInactive = IosStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` - IosStateBackground = IosStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` - IosStateForeground = IosStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` - IosStateTerminate = IosStateKey.String("terminate") -) - -// This event represents an occurrence of a lifecycle transition on the Android -// platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the this attribute represents the - // state the application has transitioned into at the occurrence of the - // event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessageTypeKey = attribute.Key("message.type") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go deleted file mode 100644 index 7235bb51d9..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go deleted file mode 100644 index a6b953f625..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go +++ /dev/null @@ -1,1071 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -const ( - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // time between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Experimental - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Experimental - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Experimental - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // HTTPClientRequestTimeInQueue is the metric conforming to the - // "http.client.request.time_in_queue" semantic conventions. It represents the - // amount of time requests spent on a queue waiting for an available - // connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" - HTTPClientRequestTimeInQueueUnit = "s" - HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Experimental - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Experimental - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingDeliverDuration is the metric conforming to the - // "messaging.deliver.duration" semantic conventions. It represents the - // measures the duration of deliver operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingDeliverDurationName = "messaging.deliver.duration" - MessagingDeliverDurationUnit = "s" - MessagingDeliverDurationDescription = "Measures the duration of deliver operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingDeliverMessages is the metric conforming to the - // "messaging.deliver.messages" semantic conventions. It represents the - // measures the number of delivered messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingDeliverMessagesName = "messaging.deliver.messages" - MessagingDeliverMessagesUnit = "{message}" - MessagingDeliverMessagesDescription = "Measures the number of delivered messages." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessesCount is the metric conforming to the - // "system.processes.count" semantic conventions. It represents the total - // number of processes in each state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessesCountName = "system.processes.count" - SystemProcessesCountUnit = "{process}" - SystemProcessesCountDescription = "Total number of processes in each state" - - // SystemProcessesCreated is the metric conforming to the - // "system.processes.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessesCreatedName = "system.processes.created" - SystemProcessesCreatedUnit = "{process}" - SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go deleted file mode 100644 index d66bbe9c23..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go +++ /dev/null @@ -1,2545 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val int) attribute.KeyValue { - return HostCPUSteppingKey.Int(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// A serverless instance. -const ( - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - // Deprecated: use the `otel.scope.name` attribute. - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - // Deprecated: use the `otel.scope.version` attribute. - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. -// -// Deprecated: use the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. -// -// Deprecated: use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go deleted file mode 100644 index fe80b1731d..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go deleted file mode 100644 index c1718234e5..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go +++ /dev/null @@ -1,1323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") -) - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") -) - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000..ef85cb70c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000..d3aa476ee1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab4..d49adf671b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000..77952d2a0b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf2433..dc5e34cad0 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000..c9b7cdbbfe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab28960524..f67039ed1f 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82a8..3ba611d713 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.29.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,15 +29,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.51.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.5.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: @@ -46,4 +47,3 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index fe19e8f97a..aa69fb4d50 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1114,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1142,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1155,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1460,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1601,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1645,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1816,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1835,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781a..5d4096d46a 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705..abab279379 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50c..d7b40b7cb6 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f72..b181f386a1 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603f1..5b592f48ad 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf47a..6561b769eb 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -252,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4fe..fcd1cfe802 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b43b..9c8850e3fd 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -835,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1190,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1522,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1585,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47..e840858b77 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48a5..27c1b9bb63 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -92,7 +93,6 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -677,11 +677,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +758,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad7..e7b532b6f8 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d714..11d0ae142c 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 0000000000..074c5e234a --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03e..ceec319dd2 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 0000000000..1d827dd5d9 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 0000000000..3221f7a633 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced2..f1ae080dcb 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb9..db320105e6 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 0000000000..59c03bc14c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 0000000000..0d9a824ce1 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab..07df71e98a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267..4b20358570 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d713..892dc13d16 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 38b8835073..e65cf0ea15 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 51b736ba06..f96b8ab492 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -160,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -182,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b0892..bb531225d5 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d906487139..00abc7c2be 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -46,6 +46,10 @@ var ( // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e173..7617be2158 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de76..092ad187a2 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16ac..19b9d63927 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab..6d8c2f518d 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d66539869..73fa407b6c 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -208,6 +208,26 @@ var ( // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n // is the number of elements. swap swaps the elements with indexes i and j. ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 0000000000..fd33af51ae --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 0000000000..be110d41f9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd..7e7aaa5463 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a890..d5c1085eea 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a2..ea0633bbda 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a..e1cd86b2fc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c70698..f46194fdc6 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165b5..f5163f770c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd44..f609c6c665 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa103257..54b2244365 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade9..fdd6fa86cc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err + } + t.windowHandler(len(header)) + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err } - t.windowHandler(n) - return + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 0000000000..c37c58c023 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 0000000000..228e9c2f20 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 0000000000..975ceb7185 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(n int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read(buf []byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6e..d2e15253bb 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd633643..e87a17f36a 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c2..0000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb45236f..23bb3fb258 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e91..db8865ec3f 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -271,17 +271,13 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -515,11 +511,51 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(c *callInfo, attempt *csAttempt) {} + // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. // @@ -540,7 +576,7 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792b..d1e1415a40 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e..0000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd6518..71195c4943 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b51..bb2b2a216c 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99be..7c70005d08 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.66.2" diff --git a/vendor/modules.txt b/vendor/modules.txt index 5b106a3f93..20eb809245 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -29,7 +29,7 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/chai2010/gettext-go v1.0.2 +# github.com/chai2010/gettext-go v1.0.3 ## explicit; go 1.14 github.com/chai2010/gettext-go github.com/chai2010/gettext-go/mo @@ -69,7 +69,7 @@ github.com/fsnotify/fsnotify/internal # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 -# github.com/go-errors/errors v1.4.2 +# github.com/go-errors/errors v1.5.1 ## explicit; go 1.14 github.com/go-errors/errors # github.com/go-logr/logr v1.4.2 @@ -103,15 +103,16 @@ github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys +# github.com/golang/glog v1.2.2 +## explicit; go 1.19 +github.com/golang/glog +github.com/golang/glog/internal/logsink +github.com/golang/glog/internal/stackdump # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp -# github.com/google/btree v1.0.1 -## explicit; go 1.12 +# github.com/google/btree v1.1.2 +## explicit; go 1.18 github.com/google/btree # github.com/google/cel-go v0.22.0 ## explicit; go 1.21.1 @@ -137,7 +138,7 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic-models v0.6.8 +# github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 ## explicit; go 1.18 github.com/google/gnostic-models/compiler github.com/google/gnostic-models/extensions @@ -165,8 +166,8 @@ github.com/google/shlex # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gorilla/websocket v1.5.1 -## explicit; go 1.20 +# github.com/gorilla/websocket v1.5.3 +## explicit; go 1.12 github.com/gorilla/websocket # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 ## explicit @@ -200,6 +201,11 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/kubeflow/mpi-operator v0.6.0 ## explicit; go 1.23 github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1 +# github.com/kubeflow/spark-operator v0.0.0-00010101000000-000000000000 => github.com/everpeace/spark-operator v0.0.0-20250120104544-7f87e6564ab4 +## explicit; go 1.23.1 +github.com/kubeflow/spark-operator/api/v1beta2 +github.com/kubeflow/spark-operator/pkg/common +github.com/kubeflow/spark-operator/pkg/util # github.com/kubeflow/training-operator v1.8.1 ## explicit; go 1.22 github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1 @@ -362,16 +368,17 @@ go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver go.etcd.io/etcd/client/v3/kubernetes -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.28.0 +# go.opentelemetry.io/otel v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -386,7 +393,6 @@ go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 # go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 ## explicit; go 1.21 @@ -399,12 +405,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.28.0 +# go.opentelemetry.io/otel/metric v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.28.0 +# go.opentelemetry.io/otel/sdk v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -412,7 +418,7 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.28.0 +# go.opentelemetry.io/otel/trace v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -556,19 +562,17 @@ golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto v0.0.0-20240528184218-531527333157 -## explicit; go 1.20 -# google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 +# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.65.0 +# google.golang.org/grpc v1.66.2 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -587,7 +591,9 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff @@ -610,11 +616,13 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver @@ -1665,3 +1673,4 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 +# github.com/kubeflow/spark-operator => github.com/everpeace/spark-operator v0.0.0-20250120104544-7f87e6564ab4